343199cfed2315e1e86719a9468d19d38ebad3be
[linux-drm-fsl-dcu.git] / drivers / crypto / atmel-aes.c
1 /*
2  * Cryptographic API.
3  *
4  * Support for ATMEL AES HW acceleration.
5  *
6  * Copyright (c) 2012 Eukréa Electromatique - ATMEL
7  * Author: Nicolas Royer <nicolas@eukrea.com>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as published
11  * by the Free Software Foundation.
12  *
13  * Some ideas are from omap-aes.c driver.
14  */
15
16
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/clk.h>
22 #include <linux/io.h>
23 #include <linux/hw_random.h>
24 #include <linux/platform_device.h>
25
26 #include <linux/device.h>
27 #include <linux/init.h>
28 #include <linux/errno.h>
29 #include <linux/interrupt.h>
30 #include <linux/irq.h>
31 #include <linux/scatterlist.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/of_device.h>
34 #include <linux/delay.h>
35 #include <linux/crypto.h>
36 #include <crypto/scatterwalk.h>
37 #include <crypto/algapi.h>
38 #include <crypto/aes.h>
39 #include <linux/platform_data/crypto-atmel.h>
40 #include <dt-bindings/dma/at91.h>
41 #include "atmel-aes-regs.h"
42
43 #define ATMEL_AES_PRIORITY      300
44
45 #define CFB8_BLOCK_SIZE         1
46 #define CFB16_BLOCK_SIZE        2
47 #define CFB32_BLOCK_SIZE        4
48 #define CFB64_BLOCK_SIZE        8
49
50 /* AES flags */
51 /* Reserve bits [18:16] [14:12] [0] for mode (same as for AES_MR) */
52 #define AES_FLAGS_ENCRYPT       AES_MR_CYPHER_ENC
53 #define AES_FLAGS_OPMODE_MASK   (AES_MR_OPMOD_MASK | AES_MR_CFBS_MASK)
54 #define AES_FLAGS_ECB           AES_MR_OPMOD_ECB
55 #define AES_FLAGS_CBC           AES_MR_OPMOD_CBC
56 #define AES_FLAGS_OFB           AES_MR_OPMOD_OFB
57 #define AES_FLAGS_CFB128        (AES_MR_OPMOD_CFB | AES_MR_CFBS_128b)
58 #define AES_FLAGS_CFB64         (AES_MR_OPMOD_CFB | AES_MR_CFBS_64b)
59 #define AES_FLAGS_CFB32         (AES_MR_OPMOD_CFB | AES_MR_CFBS_32b)
60 #define AES_FLAGS_CFB16         (AES_MR_OPMOD_CFB | AES_MR_CFBS_16b)
61 #define AES_FLAGS_CFB8          (AES_MR_OPMOD_CFB | AES_MR_CFBS_8b)
62 #define AES_FLAGS_CTR           AES_MR_OPMOD_CTR
63
64 #define AES_FLAGS_MODE_MASK     (AES_FLAGS_OPMODE_MASK |        \
65                                  AES_FLAGS_ENCRYPT)
66
67 #define AES_FLAGS_INIT          BIT(2)
68 #define AES_FLAGS_BUSY          BIT(3)
69 #define AES_FLAGS_FAST          BIT(5)
70
71 #define AES_FLAGS_PERSISTENT    (AES_FLAGS_INIT | AES_FLAGS_BUSY)
72
73 #define ATMEL_AES_QUEUE_LENGTH  50
74
75 #define ATMEL_AES_DMA_THRESHOLD         16
76
77
78 struct atmel_aes_caps {
79         bool    has_dualbuff;
80         bool    has_cfb64;
81         u32             max_burst_size;
82 };
83
84 struct atmel_aes_dev;
85
86
87 typedef int (*atmel_aes_fn_t)(struct atmel_aes_dev *);
88
89
90 struct atmel_aes_base_ctx {
91         struct atmel_aes_dev *dd;
92         atmel_aes_fn_t  start;
93
94         int             keylen;
95         u32             key[AES_KEYSIZE_256 / sizeof(u32)];
96
97         u16             block_size;
98 };
99
100 struct atmel_aes_ctx {
101         struct atmel_aes_base_ctx       base;
102 };
103
104 struct atmel_aes_reqctx {
105         unsigned long mode;
106 };
107
108 struct atmel_aes_dma {
109         struct dma_chan                 *chan;
110         struct dma_slave_config dma_conf;
111 };
112
113 struct atmel_aes_dev {
114         struct list_head        list;
115         unsigned long           phys_base;
116         void __iomem            *io_base;
117
118         struct crypto_async_request     *areq;
119         struct atmel_aes_base_ctx       *ctx;
120
121         bool                    is_async;
122         atmel_aes_fn_t          resume;
123
124         struct device           *dev;
125         struct clk              *iclk;
126         int     irq;
127
128         unsigned long           flags;
129
130         spinlock_t              lock;
131         struct crypto_queue     queue;
132
133         struct tasklet_struct   done_task;
134         struct tasklet_struct   queue_task;
135
136         size_t  total;
137
138         struct scatterlist      *in_sg;
139         unsigned int            nb_in_sg;
140         size_t                          in_offset;
141         struct scatterlist      *out_sg;
142         unsigned int            nb_out_sg;
143         size_t                          out_offset;
144
145         size_t  bufcnt;
146         size_t  buflen;
147         size_t  dma_size;
148
149         void    *buf_in;
150         int             dma_in;
151         dma_addr_t      dma_addr_in;
152         struct atmel_aes_dma    dma_lch_in;
153
154         void    *buf_out;
155         int             dma_out;
156         dma_addr_t      dma_addr_out;
157         struct atmel_aes_dma    dma_lch_out;
158
159         struct atmel_aes_caps   caps;
160
161         u32     hw_version;
162 };
163
164 struct atmel_aes_drv {
165         struct list_head        dev_list;
166         spinlock_t              lock;
167 };
168
169 static struct atmel_aes_drv atmel_aes = {
170         .dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
171         .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
172 };
173
174 static int atmel_aes_sg_length(struct ablkcipher_request *req,
175                         struct scatterlist *sg)
176 {
177         unsigned int total = req->nbytes;
178         int sg_nb;
179         unsigned int len;
180         struct scatterlist *sg_list;
181
182         sg_nb = 0;
183         sg_list = sg;
184         total = req->nbytes;
185
186         while (total) {
187                 len = min(sg_list->length, total);
188
189                 sg_nb++;
190                 total -= len;
191
192                 sg_list = sg_next(sg_list);
193                 if (!sg_list)
194                         total = 0;
195         }
196
197         return sg_nb;
198 }
199
200 static int atmel_aes_sg_copy(struct scatterlist **sg, size_t *offset,
201                         void *buf, size_t buflen, size_t total, int out)
202 {
203         size_t count, off = 0;
204
205         while (buflen && total) {
206                 count = min((*sg)->length - *offset, total);
207                 count = min(count, buflen);
208
209                 if (!count)
210                         return off;
211
212                 scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
213
214                 off += count;
215                 buflen -= count;
216                 *offset += count;
217                 total -= count;
218
219                 if (*offset == (*sg)->length) {
220                         *sg = sg_next(*sg);
221                         if (*sg)
222                                 *offset = 0;
223                         else
224                                 total = 0;
225                 }
226         }
227
228         return off;
229 }
230
231 static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
232 {
233         return readl_relaxed(dd->io_base + offset);
234 }
235
236 static inline void atmel_aes_write(struct atmel_aes_dev *dd,
237                                         u32 offset, u32 value)
238 {
239         writel_relaxed(value, dd->io_base + offset);
240 }
241
242 static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
243                                         u32 *value, int count)
244 {
245         for (; count--; value++, offset += 4)
246                 *value = atmel_aes_read(dd, offset);
247 }
248
249 static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
250                               const u32 *value, int count)
251 {
252         for (; count--; value++, offset += 4)
253                 atmel_aes_write(dd, offset, *value);
254 }
255
256 static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_base_ctx *ctx)
257 {
258         struct atmel_aes_dev *aes_dd = NULL;
259         struct atmel_aes_dev *tmp;
260
261         spin_lock_bh(&atmel_aes.lock);
262         if (!ctx->dd) {
263                 list_for_each_entry(tmp, &atmel_aes.dev_list, list) {
264                         aes_dd = tmp;
265                         break;
266                 }
267                 ctx->dd = aes_dd;
268         } else {
269                 aes_dd = ctx->dd;
270         }
271
272         spin_unlock_bh(&atmel_aes.lock);
273
274         return aes_dd;
275 }
276
277 static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
278 {
279         int err;
280
281         err = clk_prepare_enable(dd->iclk);
282         if (err)
283                 return err;
284
285         if (!(dd->flags & AES_FLAGS_INIT)) {
286                 atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
287                 atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
288                 dd->flags |= AES_FLAGS_INIT;
289         }
290
291         return 0;
292 }
293
294 static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
295 {
296         return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
297 }
298
299 static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
300 {
301         int err;
302
303         err = atmel_aes_hw_init(dd);
304         if (err)
305                 return err;
306
307         dd->hw_version = atmel_aes_get_version(dd);
308
309         dev_info(dd->dev, "version: 0x%x\n", dd->hw_version);
310
311         clk_disable_unprepare(dd->iclk);
312         return 0;
313 }
314
315 static inline void atmel_aes_set_mode(struct atmel_aes_dev *dd,
316                                       const struct atmel_aes_reqctx *rctx)
317 {
318         /* Clear all but persistent flags and set request flags. */
319         dd->flags = (dd->flags & AES_FLAGS_PERSISTENT) | rctx->mode;
320 }
321
322 static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
323 {
324         clk_disable_unprepare(dd->iclk);
325         dd->flags &= ~AES_FLAGS_BUSY;
326
327         if (dd->is_async)
328                 dd->areq->complete(dd->areq, err);
329
330         tasklet_schedule(&dd->queue_task);
331
332         return err;
333 }
334
335 static void atmel_aes_dma_callback(void *data)
336 {
337         struct atmel_aes_dev *dd = data;
338
339         dd->is_async = true;
340         (void)dd->resume(dd);
341 }
342
343 static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd,
344                 dma_addr_t dma_addr_in, dma_addr_t dma_addr_out, int length)
345 {
346         struct scatterlist sg[2];
347         struct dma_async_tx_descriptor  *in_desc, *out_desc;
348         enum dma_slave_buswidth addr_width;
349         u32 maxburst;
350
351         switch (dd->ctx->block_size) {
352         case CFB8_BLOCK_SIZE:
353                 addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
354                 maxburst = 1;
355                 break;
356
357         case CFB16_BLOCK_SIZE:
358                 addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
359                 maxburst = 1;
360                 break;
361
362         case CFB32_BLOCK_SIZE:
363         case CFB64_BLOCK_SIZE:
364                 addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
365                 maxburst = 1;
366                 break;
367
368         case AES_BLOCK_SIZE:
369                 addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
370                 maxburst = dd->caps.max_burst_size;
371                 break;
372
373         default:
374                 return -EINVAL;
375         }
376
377         dd->dma_size = length;
378
379         dma_sync_single_for_device(dd->dev, dma_addr_in, length,
380                                    DMA_TO_DEVICE);
381         dma_sync_single_for_device(dd->dev, dma_addr_out, length,
382                                    DMA_FROM_DEVICE);
383
384         dd->dma_lch_in.dma_conf.dst_addr_width = addr_width;
385         dd->dma_lch_in.dma_conf.src_maxburst = maxburst;
386         dd->dma_lch_in.dma_conf.dst_maxburst = maxburst;
387
388         dd->dma_lch_out.dma_conf.src_addr_width = addr_width;
389         dd->dma_lch_out.dma_conf.src_maxburst = maxburst;
390         dd->dma_lch_out.dma_conf.dst_maxburst = maxburst;
391
392         dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
393         dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
394
395         sg_init_table(&sg[0], 1);
396         sg_dma_address(&sg[0]) = dma_addr_in;
397         sg_dma_len(&sg[0]) = length;
398
399         sg_init_table(&sg[1], 1);
400         sg_dma_address(&sg[1]) = dma_addr_out;
401         sg_dma_len(&sg[1]) = length;
402
403         in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
404                                 1, DMA_MEM_TO_DEV,
405                                 DMA_PREP_INTERRUPT  |  DMA_CTRL_ACK);
406         if (!in_desc)
407                 return -EINVAL;
408
409         out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
410                                 1, DMA_DEV_TO_MEM,
411                                 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
412         if (!out_desc)
413                 return -EINVAL;
414
415         out_desc->callback = atmel_aes_dma_callback;
416         out_desc->callback_param = dd;
417
418         dmaengine_submit(out_desc);
419         dma_async_issue_pending(dd->dma_lch_out.chan);
420
421         dmaengine_submit(in_desc);
422         dma_async_issue_pending(dd->dma_lch_in.chan);
423
424         return 0;
425 }
426
427 static int atmel_aes_cpu_complete(struct atmel_aes_dev *dd);
428
429 static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
430 {
431         struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
432
433         dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in,
434                                 dd->dma_size, DMA_TO_DEVICE);
435         dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out,
436                                 dd->dma_size, DMA_FROM_DEVICE);
437
438         /* use cache buffers */
439         dd->nb_in_sg = atmel_aes_sg_length(req, dd->in_sg);
440         if (!dd->nb_in_sg)
441                 return -EINVAL;
442
443         dd->nb_out_sg = atmel_aes_sg_length(req, dd->out_sg);
444         if (!dd->nb_out_sg)
445                 return -EINVAL;
446
447         dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg,
448                                         dd->buf_in, dd->total);
449
450         if (!dd->bufcnt)
451                 return -EINVAL;
452
453         dd->total -= dd->bufcnt;
454
455         atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
456         atmel_aes_write_n(dd, AES_IDATAR(0), (u32 *) dd->buf_in,
457                                 dd->bufcnt >> 2);
458
459         dd->resume = atmel_aes_cpu_complete;
460         return -EINPROGRESS;
461 }
462
463 static int atmel_aes_dma_complete(struct atmel_aes_dev *dd);
464
465 static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd)
466 {
467         int err, fast = 0, in, out;
468         size_t count;
469         dma_addr_t addr_in, addr_out;
470
471         if ((!dd->in_offset) && (!dd->out_offset)) {
472                 /* check for alignment */
473                 in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
474                         IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
475                 out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
476                         IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
477                 fast = in && out;
478
479                 if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
480                         fast = 0;
481         }
482
483
484         if (fast)  {
485                 count = min_t(size_t, dd->total, sg_dma_len(dd->in_sg));
486                 count = min_t(size_t, count, sg_dma_len(dd->out_sg));
487
488                 err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
489                 if (!err) {
490                         dev_err(dd->dev, "dma_map_sg() error\n");
491                         return -EINVAL;
492                 }
493
494                 err = dma_map_sg(dd->dev, dd->out_sg, 1,
495                                 DMA_FROM_DEVICE);
496                 if (!err) {
497                         dev_err(dd->dev, "dma_map_sg() error\n");
498                         dma_unmap_sg(dd->dev, dd->in_sg, 1,
499                                 DMA_TO_DEVICE);
500                         return -EINVAL;
501                 }
502
503                 addr_in = sg_dma_address(dd->in_sg);
504                 addr_out = sg_dma_address(dd->out_sg);
505
506                 dd->flags |= AES_FLAGS_FAST;
507
508         } else {
509                 dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in,
510                                         dd->dma_size, DMA_TO_DEVICE);
511
512                 /* use cache buffers */
513                 count = atmel_aes_sg_copy(&dd->in_sg, &dd->in_offset,
514                                 dd->buf_in, dd->buflen, dd->total, 0);
515
516                 addr_in = dd->dma_addr_in;
517                 addr_out = dd->dma_addr_out;
518
519                 dd->flags &= ~AES_FLAGS_FAST;
520         }
521
522         dd->total -= count;
523
524         err = atmel_aes_crypt_dma(dd, addr_in, addr_out, count);
525
526         if (err && (dd->flags & AES_FLAGS_FAST)) {
527                 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
528                 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
529         }
530
531         dd->resume = atmel_aes_dma_complete;
532         return err ? : -EINPROGRESS;
533 }
534
535 static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
536                                  const u32 *iv)
537 {
538         u32 valmr = 0;
539
540         /* MR register must be set before IV registers */
541         if (dd->ctx->keylen == AES_KEYSIZE_128)
542                 valmr |= AES_MR_KEYSIZE_128;
543         else if (dd->ctx->keylen == AES_KEYSIZE_192)
544                 valmr |= AES_MR_KEYSIZE_192;
545         else
546                 valmr |= AES_MR_KEYSIZE_256;
547
548         valmr |= dd->flags & AES_FLAGS_MODE_MASK;
549
550         if (use_dma) {
551                 valmr |= AES_MR_SMOD_IDATAR0;
552                 if (dd->caps.has_dualbuff)
553                         valmr |= AES_MR_DUALBUFF;
554         } else {
555                 valmr |= AES_MR_SMOD_AUTO;
556         }
557
558         atmel_aes_write(dd, AES_MR, valmr);
559
560         atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
561                                                 dd->ctx->keylen >> 2);
562
563         if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB)
564                 atmel_aes_write_n(dd, AES_IVR(0), iv, 4);
565 }
566
567 static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
568                                   struct crypto_async_request *new_areq)
569 {
570         struct crypto_async_request *areq, *backlog;
571         struct atmel_aes_base_ctx *ctx;
572         unsigned long flags;
573         int err, ret = 0;
574
575         spin_lock_irqsave(&dd->lock, flags);
576         if (new_areq)
577                 ret = crypto_enqueue_request(&dd->queue, new_areq);
578         if (dd->flags & AES_FLAGS_BUSY) {
579                 spin_unlock_irqrestore(&dd->lock, flags);
580                 return ret;
581         }
582         backlog = crypto_get_backlog(&dd->queue);
583         areq = crypto_dequeue_request(&dd->queue);
584         if (areq)
585                 dd->flags |= AES_FLAGS_BUSY;
586         spin_unlock_irqrestore(&dd->lock, flags);
587
588         if (!areq)
589                 return ret;
590
591         if (backlog)
592                 backlog->complete(backlog, -EINPROGRESS);
593
594         ctx = crypto_tfm_ctx(areq->tfm);
595
596         dd->areq = areq;
597         dd->ctx = ctx;
598         dd->is_async = (areq != new_areq);
599
600         err = ctx->start(dd);
601         return (dd->is_async) ? ret : err;
602 }
603
604 static int atmel_aes_start(struct atmel_aes_dev *dd)
605 {
606         struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
607         struct atmel_aes_reqctx *rctx;
608         bool use_dma;
609         int err;
610
611         /* assign new request to device */
612         dd->total = req->nbytes;
613         dd->in_offset = 0;
614         dd->in_sg = req->src;
615         dd->out_offset = 0;
616         dd->out_sg = req->dst;
617
618         rctx = ablkcipher_request_ctx(req);
619         atmel_aes_set_mode(dd, rctx);
620
621         err = atmel_aes_hw_init(dd);
622         if (!err) {
623                 use_dma = (dd->total > ATMEL_AES_DMA_THRESHOLD);
624                 atmel_aes_write_ctrl(dd, use_dma, req->info);
625                 if (use_dma)
626                         err = atmel_aes_crypt_dma_start(dd);
627                 else
628                         err = atmel_aes_crypt_cpu_start(dd);
629         }
630         if (err && err != -EINPROGRESS) {
631                 /* aes_task will not finish it, so do it here */
632                 return atmel_aes_complete(dd, err);
633         }
634
635         return -EINPROGRESS;
636 }
637
638 static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd)
639 {
640         int err = 0;
641         size_t count;
642
643         if  (dd->flags & AES_FLAGS_FAST) {
644                 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
645                 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
646         } else {
647                 dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out,
648                                         dd->dma_size, DMA_FROM_DEVICE);
649
650                 /* copy data */
651                 count = atmel_aes_sg_copy(&dd->out_sg, &dd->out_offset,
652                                           dd->buf_out, dd->buflen,
653                                           dd->dma_size, 1);
654                 if (count != dd->dma_size) {
655                         err = -EINVAL;
656                         pr_err("not all data converted: %zu\n", count);
657                 }
658         }
659
660         return err;
661 }
662
663
664 static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
665 {
666         int err = -ENOMEM;
667
668         dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
669         dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
670         dd->buflen = PAGE_SIZE;
671         dd->buflen &= ~(AES_BLOCK_SIZE - 1);
672
673         if (!dd->buf_in || !dd->buf_out) {
674                 dev_err(dd->dev, "unable to alloc pages.\n");
675                 goto err_alloc;
676         }
677
678         /* MAP here */
679         dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
680                                         dd->buflen, DMA_TO_DEVICE);
681         if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
682                 dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
683                 err = -EINVAL;
684                 goto err_map_in;
685         }
686
687         dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
688                                         dd->buflen, DMA_FROM_DEVICE);
689         if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
690                 dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
691                 err = -EINVAL;
692                 goto err_map_out;
693         }
694
695         return 0;
696
697 err_map_out:
698         dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
699                 DMA_TO_DEVICE);
700 err_map_in:
701 err_alloc:
702         free_page((unsigned long)dd->buf_out);
703         free_page((unsigned long)dd->buf_in);
704         if (err)
705                 pr_err("error: %d\n", err);
706         return err;
707 }
708
709 static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
710 {
711         dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
712                          DMA_FROM_DEVICE);
713         dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
714                 DMA_TO_DEVICE);
715         free_page((unsigned long)dd->buf_out);
716         free_page((unsigned long)dd->buf_in);
717 }
718
719 static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
720 {
721         struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(
722                         crypto_ablkcipher_reqtfm(req));
723         struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
724         struct atmel_aes_dev *dd;
725
726         switch (mode & AES_FLAGS_OPMODE_MASK) {
727         case AES_FLAGS_CFB8:
728                 ctx->block_size = CFB8_BLOCK_SIZE;
729                 break;
730
731         case AES_FLAGS_CFB16:
732                 ctx->block_size = CFB16_BLOCK_SIZE;
733                 break;
734
735         case AES_FLAGS_CFB32:
736                 ctx->block_size = CFB32_BLOCK_SIZE;
737                 break;
738
739         case AES_FLAGS_CFB64:
740                 ctx->block_size = CFB64_BLOCK_SIZE;
741                 break;
742
743         default:
744                 ctx->block_size = AES_BLOCK_SIZE;
745                 break;
746         }
747
748         dd = atmel_aes_find_dev(ctx);
749         if (!dd)
750                 return -ENODEV;
751
752         rctx->mode = mode;
753
754         return atmel_aes_handle_queue(dd, &req->base);
755 }
756
757 static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
758 {
759         struct at_dma_slave     *sl = slave;
760
761         if (sl && sl->dma_dev == chan->device->dev) {
762                 chan->private = sl;
763                 return true;
764         } else {
765                 return false;
766         }
767 }
768
769 static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
770         struct crypto_platform_data *pdata)
771 {
772         int err = -ENOMEM;
773         dma_cap_mask_t mask;
774
775         dma_cap_zero(mask);
776         dma_cap_set(DMA_SLAVE, mask);
777
778         /* Try to grab 2 DMA channels */
779         dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask,
780                         atmel_aes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
781         if (!dd->dma_lch_in.chan)
782                 goto err_dma_in;
783
784         dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
785         dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
786                 AES_IDATAR(0);
787         dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
788         dd->dma_lch_in.dma_conf.src_addr_width =
789                 DMA_SLAVE_BUSWIDTH_4_BYTES;
790         dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
791         dd->dma_lch_in.dma_conf.dst_addr_width =
792                 DMA_SLAVE_BUSWIDTH_4_BYTES;
793         dd->dma_lch_in.dma_conf.device_fc = false;
794
795         dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask,
796                         atmel_aes_filter, &pdata->dma_slave->txdata, dd->dev, "rx");
797         if (!dd->dma_lch_out.chan)
798                 goto err_dma_out;
799
800         dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
801         dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
802                 AES_ODATAR(0);
803         dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
804         dd->dma_lch_out.dma_conf.src_addr_width =
805                 DMA_SLAVE_BUSWIDTH_4_BYTES;
806         dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
807         dd->dma_lch_out.dma_conf.dst_addr_width =
808                 DMA_SLAVE_BUSWIDTH_4_BYTES;
809         dd->dma_lch_out.dma_conf.device_fc = false;
810
811         return 0;
812
813 err_dma_out:
814         dma_release_channel(dd->dma_lch_in.chan);
815 err_dma_in:
816         dev_warn(dd->dev, "no DMA channel available\n");
817         return err;
818 }
819
820 static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
821 {
822         dma_release_channel(dd->dma_lch_in.chan);
823         dma_release_channel(dd->dma_lch_out.chan);
824 }
825
826 static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
827                            unsigned int keylen)
828 {
829         struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
830
831         if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
832                    keylen != AES_KEYSIZE_256) {
833                 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
834                 return -EINVAL;
835         }
836
837         memcpy(ctx->key, key, keylen);
838         ctx->keylen = keylen;
839
840         return 0;
841 }
842
843 static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
844 {
845         return atmel_aes_crypt(req, AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
846 }
847
848 static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
849 {
850         return atmel_aes_crypt(req, AES_FLAGS_ECB);
851 }
852
853 static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
854 {
855         return atmel_aes_crypt(req,
856                 AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
857 }
858
859 static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
860 {
861         return atmel_aes_crypt(req,
862                 AES_FLAGS_CBC);
863 }
864
865 static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
866 {
867         return atmel_aes_crypt(req,
868                 AES_FLAGS_ENCRYPT | AES_FLAGS_OFB);
869 }
870
871 static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
872 {
873         return atmel_aes_crypt(req,
874                 AES_FLAGS_OFB);
875 }
876
877 static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
878 {
879         return atmel_aes_crypt(req, AES_FLAGS_CFB128 | AES_FLAGS_ENCRYPT);
880 }
881
882 static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
883 {
884         return atmel_aes_crypt(req, AES_FLAGS_CFB128);
885 }
886
887 static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
888 {
889         return atmel_aes_crypt(req, AES_FLAGS_CFB64 | AES_FLAGS_ENCRYPT);
890 }
891
892 static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
893 {
894         return atmel_aes_crypt(req, AES_FLAGS_CFB64);
895 }
896
897 static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
898 {
899         return atmel_aes_crypt(req, AES_FLAGS_CFB32 | AES_FLAGS_ENCRYPT);
900 }
901
902 static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
903 {
904         return atmel_aes_crypt(req, AES_FLAGS_CFB32);
905 }
906
907 static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
908 {
909         return atmel_aes_crypt(req, AES_FLAGS_CFB16 | AES_FLAGS_ENCRYPT);
910 }
911
912 static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
913 {
914         return atmel_aes_crypt(req, AES_FLAGS_CFB16);
915 }
916
917 static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
918 {
919         return atmel_aes_crypt(req, AES_FLAGS_CFB8 | AES_FLAGS_ENCRYPT);
920 }
921
922 static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
923 {
924         return atmel_aes_crypt(req, AES_FLAGS_CFB8);
925 }
926
927 static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
928 {
929         return atmel_aes_crypt(req,
930                 AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
931 }
932
933 static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
934 {
935         return atmel_aes_crypt(req,
936                 AES_FLAGS_CTR);
937 }
938
939 static int atmel_aes_cra_init(struct crypto_tfm *tfm)
940 {
941         struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm);
942
943         tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
944         ctx->base.start = atmel_aes_start;
945
946         return 0;
947 }
948
949 static void atmel_aes_cra_exit(struct crypto_tfm *tfm)
950 {
951 }
952
953 static struct crypto_alg aes_algs[] = {
954 {
955         .cra_name               = "ecb(aes)",
956         .cra_driver_name        = "atmel-ecb-aes",
957         .cra_priority           = ATMEL_AES_PRIORITY,
958         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
959         .cra_blocksize          = AES_BLOCK_SIZE,
960         .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
961         .cra_alignmask          = 0xf,
962         .cra_type               = &crypto_ablkcipher_type,
963         .cra_module             = THIS_MODULE,
964         .cra_init               = atmel_aes_cra_init,
965         .cra_exit               = atmel_aes_cra_exit,
966         .cra_u.ablkcipher = {
967                 .min_keysize    = AES_MIN_KEY_SIZE,
968                 .max_keysize    = AES_MAX_KEY_SIZE,
969                 .setkey         = atmel_aes_setkey,
970                 .encrypt        = atmel_aes_ecb_encrypt,
971                 .decrypt        = atmel_aes_ecb_decrypt,
972         }
973 },
974 {
975         .cra_name               = "cbc(aes)",
976         .cra_driver_name        = "atmel-cbc-aes",
977         .cra_priority           = ATMEL_AES_PRIORITY,
978         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
979         .cra_blocksize          = AES_BLOCK_SIZE,
980         .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
981         .cra_alignmask          = 0xf,
982         .cra_type               = &crypto_ablkcipher_type,
983         .cra_module             = THIS_MODULE,
984         .cra_init               = atmel_aes_cra_init,
985         .cra_exit               = atmel_aes_cra_exit,
986         .cra_u.ablkcipher = {
987                 .min_keysize    = AES_MIN_KEY_SIZE,
988                 .max_keysize    = AES_MAX_KEY_SIZE,
989                 .ivsize         = AES_BLOCK_SIZE,
990                 .setkey         = atmel_aes_setkey,
991                 .encrypt        = atmel_aes_cbc_encrypt,
992                 .decrypt        = atmel_aes_cbc_decrypt,
993         }
994 },
995 {
996         .cra_name               = "ofb(aes)",
997         .cra_driver_name        = "atmel-ofb-aes",
998         .cra_priority           = ATMEL_AES_PRIORITY,
999         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1000         .cra_blocksize          = AES_BLOCK_SIZE,
1001         .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1002         .cra_alignmask          = 0xf,
1003         .cra_type               = &crypto_ablkcipher_type,
1004         .cra_module             = THIS_MODULE,
1005         .cra_init               = atmel_aes_cra_init,
1006         .cra_exit               = atmel_aes_cra_exit,
1007         .cra_u.ablkcipher = {
1008                 .min_keysize    = AES_MIN_KEY_SIZE,
1009                 .max_keysize    = AES_MAX_KEY_SIZE,
1010                 .ivsize         = AES_BLOCK_SIZE,
1011                 .setkey         = atmel_aes_setkey,
1012                 .encrypt        = atmel_aes_ofb_encrypt,
1013                 .decrypt        = atmel_aes_ofb_decrypt,
1014         }
1015 },
1016 {
1017         .cra_name               = "cfb(aes)",
1018         .cra_driver_name        = "atmel-cfb-aes",
1019         .cra_priority           = ATMEL_AES_PRIORITY,
1020         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1021         .cra_blocksize          = AES_BLOCK_SIZE,
1022         .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1023         .cra_alignmask          = 0xf,
1024         .cra_type               = &crypto_ablkcipher_type,
1025         .cra_module             = THIS_MODULE,
1026         .cra_init               = atmel_aes_cra_init,
1027         .cra_exit               = atmel_aes_cra_exit,
1028         .cra_u.ablkcipher = {
1029                 .min_keysize    = AES_MIN_KEY_SIZE,
1030                 .max_keysize    = AES_MAX_KEY_SIZE,
1031                 .ivsize         = AES_BLOCK_SIZE,
1032                 .setkey         = atmel_aes_setkey,
1033                 .encrypt        = atmel_aes_cfb_encrypt,
1034                 .decrypt        = atmel_aes_cfb_decrypt,
1035         }
1036 },
1037 {
1038         .cra_name               = "cfb32(aes)",
1039         .cra_driver_name        = "atmel-cfb32-aes",
1040         .cra_priority           = ATMEL_AES_PRIORITY,
1041         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1042         .cra_blocksize          = CFB32_BLOCK_SIZE,
1043         .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1044         .cra_alignmask          = 0x3,
1045         .cra_type               = &crypto_ablkcipher_type,
1046         .cra_module             = THIS_MODULE,
1047         .cra_init               = atmel_aes_cra_init,
1048         .cra_exit               = atmel_aes_cra_exit,
1049         .cra_u.ablkcipher = {
1050                 .min_keysize    = AES_MIN_KEY_SIZE,
1051                 .max_keysize    = AES_MAX_KEY_SIZE,
1052                 .ivsize         = AES_BLOCK_SIZE,
1053                 .setkey         = atmel_aes_setkey,
1054                 .encrypt        = atmel_aes_cfb32_encrypt,
1055                 .decrypt        = atmel_aes_cfb32_decrypt,
1056         }
1057 },
1058 {
1059         .cra_name               = "cfb16(aes)",
1060         .cra_driver_name        = "atmel-cfb16-aes",
1061         .cra_priority           = ATMEL_AES_PRIORITY,
1062         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1063         .cra_blocksize          = CFB16_BLOCK_SIZE,
1064         .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1065         .cra_alignmask          = 0x1,
1066         .cra_type               = &crypto_ablkcipher_type,
1067         .cra_module             = THIS_MODULE,
1068         .cra_init               = atmel_aes_cra_init,
1069         .cra_exit               = atmel_aes_cra_exit,
1070         .cra_u.ablkcipher = {
1071                 .min_keysize    = AES_MIN_KEY_SIZE,
1072                 .max_keysize    = AES_MAX_KEY_SIZE,
1073                 .ivsize         = AES_BLOCK_SIZE,
1074                 .setkey         = atmel_aes_setkey,
1075                 .encrypt        = atmel_aes_cfb16_encrypt,
1076                 .decrypt        = atmel_aes_cfb16_decrypt,
1077         }
1078 },
1079 {
1080         .cra_name               = "cfb8(aes)",
1081         .cra_driver_name        = "atmel-cfb8-aes",
1082         .cra_priority           = ATMEL_AES_PRIORITY,
1083         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1084         .cra_blocksize          = CFB8_BLOCK_SIZE,
1085         .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1086         .cra_alignmask          = 0x0,
1087         .cra_type               = &crypto_ablkcipher_type,
1088         .cra_module             = THIS_MODULE,
1089         .cra_init               = atmel_aes_cra_init,
1090         .cra_exit               = atmel_aes_cra_exit,
1091         .cra_u.ablkcipher = {
1092                 .min_keysize    = AES_MIN_KEY_SIZE,
1093                 .max_keysize    = AES_MAX_KEY_SIZE,
1094                 .ivsize         = AES_BLOCK_SIZE,
1095                 .setkey         = atmel_aes_setkey,
1096                 .encrypt        = atmel_aes_cfb8_encrypt,
1097                 .decrypt        = atmel_aes_cfb8_decrypt,
1098         }
1099 },
1100 {
1101         .cra_name               = "ctr(aes)",
1102         .cra_driver_name        = "atmel-ctr-aes",
1103         .cra_priority           = ATMEL_AES_PRIORITY,
1104         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1105         .cra_blocksize          = AES_BLOCK_SIZE,
1106         .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1107         .cra_alignmask          = 0xf,
1108         .cra_type               = &crypto_ablkcipher_type,
1109         .cra_module             = THIS_MODULE,
1110         .cra_init               = atmel_aes_cra_init,
1111         .cra_exit               = atmel_aes_cra_exit,
1112         .cra_u.ablkcipher = {
1113                 .min_keysize    = AES_MIN_KEY_SIZE,
1114                 .max_keysize    = AES_MAX_KEY_SIZE,
1115                 .ivsize         = AES_BLOCK_SIZE,
1116                 .setkey         = atmel_aes_setkey,
1117                 .encrypt        = atmel_aes_ctr_encrypt,
1118                 .decrypt        = atmel_aes_ctr_decrypt,
1119         }
1120 },
1121 };
1122
1123 static struct crypto_alg aes_cfb64_alg = {
1124         .cra_name               = "cfb64(aes)",
1125         .cra_driver_name        = "atmel-cfb64-aes",
1126         .cra_priority           = ATMEL_AES_PRIORITY,
1127         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1128         .cra_blocksize          = CFB64_BLOCK_SIZE,
1129         .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1130         .cra_alignmask          = 0x7,
1131         .cra_type               = &crypto_ablkcipher_type,
1132         .cra_module             = THIS_MODULE,
1133         .cra_init               = atmel_aes_cra_init,
1134         .cra_exit               = atmel_aes_cra_exit,
1135         .cra_u.ablkcipher = {
1136                 .min_keysize    = AES_MIN_KEY_SIZE,
1137                 .max_keysize    = AES_MAX_KEY_SIZE,
1138                 .ivsize         = AES_BLOCK_SIZE,
1139                 .setkey         = atmel_aes_setkey,
1140                 .encrypt        = atmel_aes_cfb64_encrypt,
1141                 .decrypt        = atmel_aes_cfb64_decrypt,
1142         }
1143 };
1144
1145 static void atmel_aes_queue_task(unsigned long data)
1146 {
1147         struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
1148
1149         atmel_aes_handle_queue(dd, NULL);
1150 }
1151
1152 static void atmel_aes_done_task(unsigned long data)
1153 {
1154         struct atmel_aes_dev *dd = (struct atmel_aes_dev *) data;
1155
1156         dd->is_async = true;
1157         (void)dd->resume(dd);
1158 }
1159
1160 static int atmel_aes_dma_complete(struct atmel_aes_dev *dd)
1161 {
1162         int err;
1163
1164         err = atmel_aes_crypt_dma_stop(dd);
1165         if (dd->total && !err) {
1166                 if (dd->flags & AES_FLAGS_FAST) {
1167                         dd->in_sg = sg_next(dd->in_sg);
1168                         dd->out_sg = sg_next(dd->out_sg);
1169                         if (!dd->in_sg || !dd->out_sg)
1170                                 err = -EINVAL;
1171                 }
1172                 if (!err)
1173                         err = atmel_aes_crypt_dma_start(dd);
1174                 if (!err || err == -EINPROGRESS)
1175                         return -EINPROGRESS; /* DMA started. Not fininishing. */
1176         }
1177
1178         return atmel_aes_complete(dd, err);
1179 }
1180
1181 static int atmel_aes_cpu_complete(struct atmel_aes_dev *dd)
1182 {
1183         int err;
1184
1185         atmel_aes_read_n(dd, AES_ODATAR(0), (u32 *) dd->buf_out,
1186                          dd->bufcnt >> 2);
1187
1188         if (sg_copy_from_buffer(dd->out_sg, dd->nb_out_sg,
1189                                 dd->buf_out, dd->bufcnt))
1190                 err = 0;
1191         else
1192                 err = -EINVAL;
1193
1194         return atmel_aes_complete(dd, err);
1195 }
1196
1197 static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
1198 {
1199         struct atmel_aes_dev *aes_dd = dev_id;
1200         u32 reg;
1201
1202         reg = atmel_aes_read(aes_dd, AES_ISR);
1203         if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
1204                 atmel_aes_write(aes_dd, AES_IDR, reg);
1205                 if (AES_FLAGS_BUSY & aes_dd->flags)
1206                         tasklet_schedule(&aes_dd->done_task);
1207                 else
1208                         dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
1209                 return IRQ_HANDLED;
1210         }
1211
1212         return IRQ_NONE;
1213 }
1214
1215 static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
1216 {
1217         int i;
1218
1219         if (dd->caps.has_cfb64)
1220                 crypto_unregister_alg(&aes_cfb64_alg);
1221
1222         for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1223                 crypto_unregister_alg(&aes_algs[i]);
1224 }
1225
1226 static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
1227 {
1228         int err, i, j;
1229
1230         for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1231                 err = crypto_register_alg(&aes_algs[i]);
1232                 if (err)
1233                         goto err_aes_algs;
1234         }
1235
1236         if (dd->caps.has_cfb64) {
1237                 err = crypto_register_alg(&aes_cfb64_alg);
1238                 if (err)
1239                         goto err_aes_cfb64_alg;
1240         }
1241
1242         return 0;
1243
1244 err_aes_cfb64_alg:
1245         i = ARRAY_SIZE(aes_algs);
1246 err_aes_algs:
1247         for (j = 0; j < i; j++)
1248                 crypto_unregister_alg(&aes_algs[j]);
1249
1250         return err;
1251 }
1252
1253 static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
1254 {
1255         dd->caps.has_dualbuff = 0;
1256         dd->caps.has_cfb64 = 0;
1257         dd->caps.max_burst_size = 1;
1258
1259         /* keep only major version number */
1260         switch (dd->hw_version & 0xff0) {
1261         case 0x500:
1262                 dd->caps.has_dualbuff = 1;
1263                 dd->caps.has_cfb64 = 1;
1264                 dd->caps.max_burst_size = 4;
1265                 break;
1266         case 0x200:
1267                 dd->caps.has_dualbuff = 1;
1268                 dd->caps.has_cfb64 = 1;
1269                 dd->caps.max_burst_size = 4;
1270                 break;
1271         case 0x130:
1272                 dd->caps.has_dualbuff = 1;
1273                 dd->caps.has_cfb64 = 1;
1274                 dd->caps.max_burst_size = 4;
1275                 break;
1276         case 0x120:
1277                 break;
1278         default:
1279                 dev_warn(dd->dev,
1280                                 "Unmanaged aes version, set minimum capabilities\n");
1281                 break;
1282         }
1283 }
1284
1285 #if defined(CONFIG_OF)
1286 static const struct of_device_id atmel_aes_dt_ids[] = {
1287         { .compatible = "atmel,at91sam9g46-aes" },
1288         { /* sentinel */ }
1289 };
1290 MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
1291
1292 static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
1293 {
1294         struct device_node *np = pdev->dev.of_node;
1295         struct crypto_platform_data *pdata;
1296
1297         if (!np) {
1298                 dev_err(&pdev->dev, "device node not found\n");
1299                 return ERR_PTR(-EINVAL);
1300         }
1301
1302         pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1303         if (!pdata) {
1304                 dev_err(&pdev->dev, "could not allocate memory for pdata\n");
1305                 return ERR_PTR(-ENOMEM);
1306         }
1307
1308         pdata->dma_slave = devm_kzalloc(&pdev->dev,
1309                                         sizeof(*(pdata->dma_slave)),
1310                                         GFP_KERNEL);
1311         if (!pdata->dma_slave) {
1312                 dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
1313                 devm_kfree(&pdev->dev, pdata);
1314                 return ERR_PTR(-ENOMEM);
1315         }
1316
1317         return pdata;
1318 }
1319 #else
1320 static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
1321 {
1322         return ERR_PTR(-EINVAL);
1323 }
1324 #endif
1325
1326 static int atmel_aes_probe(struct platform_device *pdev)
1327 {
1328         struct atmel_aes_dev *aes_dd;
1329         struct crypto_platform_data *pdata;
1330         struct device *dev = &pdev->dev;
1331         struct resource *aes_res;
1332         int err;
1333
1334         pdata = pdev->dev.platform_data;
1335         if (!pdata) {
1336                 pdata = atmel_aes_of_init(pdev);
1337                 if (IS_ERR(pdata)) {
1338                         err = PTR_ERR(pdata);
1339                         goto aes_dd_err;
1340                 }
1341         }
1342
1343         if (!pdata->dma_slave) {
1344                 err = -ENXIO;
1345                 goto aes_dd_err;
1346         }
1347
1348         aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
1349         if (aes_dd == NULL) {
1350                 dev_err(dev, "unable to alloc data struct.\n");
1351                 err = -ENOMEM;
1352                 goto aes_dd_err;
1353         }
1354
1355         aes_dd->dev = dev;
1356
1357         platform_set_drvdata(pdev, aes_dd);
1358
1359         INIT_LIST_HEAD(&aes_dd->list);
1360         spin_lock_init(&aes_dd->lock);
1361
1362         tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
1363                                         (unsigned long)aes_dd);
1364         tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
1365                                         (unsigned long)aes_dd);
1366
1367         crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
1368
1369         aes_dd->irq = -1;
1370
1371         /* Get the base address */
1372         aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1373         if (!aes_res) {
1374                 dev_err(dev, "no MEM resource info\n");
1375                 err = -ENODEV;
1376                 goto res_err;
1377         }
1378         aes_dd->phys_base = aes_res->start;
1379
1380         /* Get the IRQ */
1381         aes_dd->irq = platform_get_irq(pdev,  0);
1382         if (aes_dd->irq < 0) {
1383                 dev_err(dev, "no IRQ resource info\n");
1384                 err = aes_dd->irq;
1385                 goto res_err;
1386         }
1387
1388         err = devm_request_irq(&pdev->dev, aes_dd->irq, atmel_aes_irq,
1389                                IRQF_SHARED, "atmel-aes", aes_dd);
1390         if (err) {
1391                 dev_err(dev, "unable to request aes irq.\n");
1392                 goto res_err;
1393         }
1394
1395         /* Initializing the clock */
1396         aes_dd->iclk = devm_clk_get(&pdev->dev, "aes_clk");
1397         if (IS_ERR(aes_dd->iclk)) {
1398                 dev_err(dev, "clock initialization failed.\n");
1399                 err = PTR_ERR(aes_dd->iclk);
1400                 goto res_err;
1401         }
1402
1403         aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
1404         if (!aes_dd->io_base) {
1405                 dev_err(dev, "can't ioremap\n");
1406                 err = -ENOMEM;
1407                 goto res_err;
1408         }
1409
1410         err = atmel_aes_hw_version_init(aes_dd);
1411         if (err)
1412                 goto res_err;
1413
1414         atmel_aes_get_cap(aes_dd);
1415
1416         err = atmel_aes_buff_init(aes_dd);
1417         if (err)
1418                 goto err_aes_buff;
1419
1420         err = atmel_aes_dma_init(aes_dd, pdata);
1421         if (err)
1422                 goto err_aes_dma;
1423
1424         spin_lock(&atmel_aes.lock);
1425         list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
1426         spin_unlock(&atmel_aes.lock);
1427
1428         err = atmel_aes_register_algs(aes_dd);
1429         if (err)
1430                 goto err_algs;
1431
1432         dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n",
1433                         dma_chan_name(aes_dd->dma_lch_in.chan),
1434                         dma_chan_name(aes_dd->dma_lch_out.chan));
1435
1436         return 0;
1437
1438 err_algs:
1439         spin_lock(&atmel_aes.lock);
1440         list_del(&aes_dd->list);
1441         spin_unlock(&atmel_aes.lock);
1442         atmel_aes_dma_cleanup(aes_dd);
1443 err_aes_dma:
1444         atmel_aes_buff_cleanup(aes_dd);
1445 err_aes_buff:
1446 res_err:
1447         tasklet_kill(&aes_dd->done_task);
1448         tasklet_kill(&aes_dd->queue_task);
1449 aes_dd_err:
1450         dev_err(dev, "initialization failed.\n");
1451
1452         return err;
1453 }
1454
1455 static int atmel_aes_remove(struct platform_device *pdev)
1456 {
1457         static struct atmel_aes_dev *aes_dd;
1458
1459         aes_dd = platform_get_drvdata(pdev);
1460         if (!aes_dd)
1461                 return -ENODEV;
1462         spin_lock(&atmel_aes.lock);
1463         list_del(&aes_dd->list);
1464         spin_unlock(&atmel_aes.lock);
1465
1466         atmel_aes_unregister_algs(aes_dd);
1467
1468         tasklet_kill(&aes_dd->done_task);
1469         tasklet_kill(&aes_dd->queue_task);
1470
1471         atmel_aes_dma_cleanup(aes_dd);
1472         atmel_aes_buff_cleanup(aes_dd);
1473
1474         return 0;
1475 }
1476
1477 static struct platform_driver atmel_aes_driver = {
1478         .probe          = atmel_aes_probe,
1479         .remove         = atmel_aes_remove,
1480         .driver         = {
1481                 .name   = "atmel_aes",
1482                 .of_match_table = of_match_ptr(atmel_aes_dt_ids),
1483         },
1484 };
1485
1486 module_platform_driver(atmel_aes_driver);
1487
1488 MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
1489 MODULE_LICENSE("GPL v2");
1490 MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");