Merge remote-tracking branch 'spi/fix/pxa' into spi-linus
[linux-drm-fsl-dcu.git] / drivers / crypto / caam / caamalg.c
1 /*
2  * caam - Freescale FSL CAAM support for crypto API
3  *
4  * Copyright 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Based on talitos crypto API driver.
7  *
8  * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9  *
10  * ---------------                     ---------------
11  * | JobDesc #1  |-------------------->|  ShareDesc  |
12  * | *(packet 1) |                     |   (PDB)     |
13  * ---------------      |------------->|  (hashKey)  |
14  *       .              |              | (cipherKey) |
15  *       .              |    |-------->| (operation) |
16  * ---------------      |    |         ---------------
17  * | JobDesc #2  |------|    |
18  * | *(packet 2) |           |
19  * ---------------           |
20  *       .                   |
21  *       .                   |
22  * ---------------           |
23  * | JobDesc #3  |------------
24  * | *(packet 3) |
25  * ---------------
26  *
27  * The SharedDesc never changes for a connection unless rekeyed, but
28  * each packet will likely be in a different place. So all we need
29  * to know to process the packet is where the input is, where the
30  * output goes, and what context we want to process with. Context is
31  * in the SharedDesc, packet references in the JobDesc.
32  *
33  * So, a job desc looks like:
34  *
35  * ---------------------
36  * | Header            |
37  * | ShareDesc Pointer |
38  * | SEQ_OUT_PTR       |
39  * | (output buffer)   |
40  * | (output length)   |
41  * | SEQ_IN_PTR        |
42  * | (input buffer)    |
43  * | (input length)    |
44  * ---------------------
45  */
46
47 #include "compat.h"
48
49 #include "regs.h"
50 #include "intern.h"
51 #include "desc_constr.h"
52 #include "jr.h"
53 #include "error.h"
54 #include "sg_sw_sec4.h"
55 #include "key_gen.h"
56
57 /*
58  * crypto alg
59  */
60 #define CAAM_CRA_PRIORITY               3000
61 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62 #define CAAM_MAX_KEY_SIZE               (AES_MAX_KEY_SIZE + \
63                                          SHA512_DIGEST_SIZE * 2)
64 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
65 #define CAAM_MAX_IV_LENGTH              16
66
67 /* length of descriptors text */
68 #define DESC_AEAD_BASE                  (4 * CAAM_CMD_SZ)
69 #define DESC_AEAD_ENC_LEN               (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
70 #define DESC_AEAD_DEC_LEN               (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
71 #define DESC_AEAD_GIVENC_LEN            (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
72
73 #define DESC_ABLKCIPHER_BASE            (3 * CAAM_CMD_SZ)
74 #define DESC_ABLKCIPHER_ENC_LEN         (DESC_ABLKCIPHER_BASE + \
75                                          20 * CAAM_CMD_SZ)
76 #define DESC_ABLKCIPHER_DEC_LEN         (DESC_ABLKCIPHER_BASE + \
77                                          15 * CAAM_CMD_SZ)
78
79 #define DESC_MAX_USED_BYTES             (DESC_AEAD_GIVENC_LEN + \
80                                          CAAM_MAX_KEY_SIZE)
81 #define DESC_MAX_USED_LEN               (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
82
83 #ifdef DEBUG
84 /* for print_hex_dumps with line references */
85 #define debug(format, arg...) printk(format, arg)
86 #else
87 #define debug(format, arg...)
88 #endif
89
90 /* Set DK bit in class 1 operation if shared */
91 static inline void append_dec_op1(u32 *desc, u32 type)
92 {
93         u32 *jump_cmd, *uncond_jump_cmd;
94
95         jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
96         append_operation(desc, type | OP_ALG_AS_INITFINAL |
97                          OP_ALG_DECRYPT);
98         uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
99         set_jump_tgt_here(desc, jump_cmd);
100         append_operation(desc, type | OP_ALG_AS_INITFINAL |
101                          OP_ALG_DECRYPT | OP_ALG_AAI_DK);
102         set_jump_tgt_here(desc, uncond_jump_cmd);
103 }
104
105 /*
106  * Wait for completion of class 1 key loading before allowing
107  * error propagation
108  */
109 static inline void append_dec_shr_done(u32 *desc)
110 {
111         u32 *jump_cmd;
112
113         jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
114         set_jump_tgt_here(desc, jump_cmd);
115         append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
116 }
117
118 /*
119  * For aead functions, read payload and write payload,
120  * both of which are specified in req->src and req->dst
121  */
122 static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
123 {
124         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
125                              KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
126         append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
127 }
128
129 /*
130  * For aead encrypt and decrypt, read iv for both classes
131  */
132 static inline void aead_append_ld_iv(u32 *desc, int ivsize)
133 {
134         append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
135                    LDST_CLASS_1_CCB | ivsize);
136         append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
137 }
138
139 /*
140  * For ablkcipher encrypt and decrypt, read from req->src and
141  * write to req->dst
142  */
143 static inline void ablkcipher_append_src_dst(u32 *desc)
144 {
145         append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
146         append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
147         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
148                              KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
149         append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
150 }
151
152 /*
153  * If all data, including src (with assoc and iv) or dst (with iv only) are
154  * contiguous
155  */
156 #define GIV_SRC_CONTIG          1
157 #define GIV_DST_CONTIG          (1 << 1)
158
159 /*
160  * per-session context
161  */
162 struct caam_ctx {
163         struct device *jrdev;
164         u32 sh_desc_enc[DESC_MAX_USED_LEN];
165         u32 sh_desc_dec[DESC_MAX_USED_LEN];
166         u32 sh_desc_givenc[DESC_MAX_USED_LEN];
167         dma_addr_t sh_desc_enc_dma;
168         dma_addr_t sh_desc_dec_dma;
169         dma_addr_t sh_desc_givenc_dma;
170         u32 class1_alg_type;
171         u32 class2_alg_type;
172         u32 alg_op;
173         u8 key[CAAM_MAX_KEY_SIZE];
174         dma_addr_t key_dma;
175         unsigned int enckeylen;
176         unsigned int split_key_len;
177         unsigned int split_key_pad_len;
178         unsigned int authsize;
179 };
180
181 static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
182                             int keys_fit_inline)
183 {
184         if (keys_fit_inline) {
185                 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
186                                   ctx->split_key_len, CLASS_2 |
187                                   KEY_DEST_MDHA_SPLIT | KEY_ENC);
188                 append_key_as_imm(desc, (void *)ctx->key +
189                                   ctx->split_key_pad_len, ctx->enckeylen,
190                                   ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
191         } else {
192                 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
193                            KEY_DEST_MDHA_SPLIT | KEY_ENC);
194                 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
195                            ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
196         }
197 }
198
199 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
200                                   int keys_fit_inline)
201 {
202         u32 *key_jump_cmd;
203
204         init_sh_desc(desc, HDR_SHARE_SERIAL);
205
206         /* Skip if already shared */
207         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
208                                    JUMP_COND_SHRD);
209
210         append_key_aead(desc, ctx, keys_fit_inline);
211
212         set_jump_tgt_here(desc, key_jump_cmd);
213
214         /* Propagate errors from shared to job descriptor */
215         append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
216 }
217
218 static int aead_set_sh_desc(struct crypto_aead *aead)
219 {
220         struct aead_tfm *tfm = &aead->base.crt_aead;
221         struct caam_ctx *ctx = crypto_aead_ctx(aead);
222         struct device *jrdev = ctx->jrdev;
223         bool keys_fit_inline = false;
224         u32 *key_jump_cmd, *jump_cmd;
225         u32 geniv, moveiv;
226         u32 *desc;
227
228         if (!ctx->enckeylen || !ctx->authsize)
229                 return 0;
230
231         /*
232          * Job Descriptor and Shared Descriptors
233          * must all fit into the 64-word Descriptor h/w Buffer
234          */
235         if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
236             ctx->split_key_pad_len + ctx->enckeylen <=
237             CAAM_DESC_BYTES_MAX)
238                 keys_fit_inline = true;
239
240         /* aead_encrypt shared descriptor */
241         desc = ctx->sh_desc_enc;
242
243         init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
244
245         /* Class 2 operation */
246         append_operation(desc, ctx->class2_alg_type |
247                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
248
249         /* cryptlen = seqoutlen - authsize */
250         append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
251
252         /* assoclen + cryptlen = seqinlen - ivsize */
253         append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
254
255         /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */
256         append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
257
258         /* read assoc before reading payload */
259         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
260                              KEY_VLF);
261         aead_append_ld_iv(desc, tfm->ivsize);
262
263         /* Class 1 operation */
264         append_operation(desc, ctx->class1_alg_type |
265                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
266
267         /* Read and write cryptlen bytes */
268         append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
269         append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
270         aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
271
272         /* Write ICV */
273         append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
274                          LDST_SRCDST_BYTE_CONTEXT);
275
276         ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
277                                               desc_bytes(desc),
278                                               DMA_TO_DEVICE);
279         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
280                 dev_err(jrdev, "unable to map shared descriptor\n");
281                 return -ENOMEM;
282         }
283 #ifdef DEBUG
284         print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
285                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
286                        desc_bytes(desc), 1);
287 #endif
288
289         /*
290          * Job Descriptor and Shared Descriptors
291          * must all fit into the 64-word Descriptor h/w Buffer
292          */
293         if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
294             ctx->split_key_pad_len + ctx->enckeylen <=
295             CAAM_DESC_BYTES_MAX)
296                 keys_fit_inline = true;
297
298         desc = ctx->sh_desc_dec;
299
300         /* aead_decrypt shared descriptor */
301         init_sh_desc(desc, HDR_SHARE_SERIAL);
302
303         /* Skip if already shared */
304         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
305                                    JUMP_COND_SHRD);
306
307         append_key_aead(desc, ctx, keys_fit_inline);
308
309         /* Only propagate error immediately if shared */
310         jump_cmd = append_jump(desc, JUMP_TEST_ALL);
311         set_jump_tgt_here(desc, key_jump_cmd);
312         append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
313         set_jump_tgt_here(desc, jump_cmd);
314
315         /* Class 2 operation */
316         append_operation(desc, ctx->class2_alg_type |
317                          OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
318
319         /* assoclen + cryptlen = seqinlen - ivsize */
320         append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
321                                 ctx->authsize + tfm->ivsize)
322         /* assoclen = (assoclen + cryptlen) - cryptlen */
323         append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
324         append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
325
326         /* read assoc before reading payload */
327         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
328                              KEY_VLF);
329
330         aead_append_ld_iv(desc, tfm->ivsize);
331
332         append_dec_op1(desc, ctx->class1_alg_type);
333
334         /* Read and write cryptlen bytes */
335         append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
336         append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
337         aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
338
339         /* Load ICV */
340         append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
341                              FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
342         append_dec_shr_done(desc);
343
344         ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
345                                               desc_bytes(desc),
346                                               DMA_TO_DEVICE);
347         if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
348                 dev_err(jrdev, "unable to map shared descriptor\n");
349                 return -ENOMEM;
350         }
351 #ifdef DEBUG
352         print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
353                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
354                        desc_bytes(desc), 1);
355 #endif
356
357         /*
358          * Job Descriptor and Shared Descriptors
359          * must all fit into the 64-word Descriptor h/w Buffer
360          */
361         if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
362             ctx->split_key_pad_len + ctx->enckeylen <=
363             CAAM_DESC_BYTES_MAX)
364                 keys_fit_inline = true;
365
366         /* aead_givencrypt shared descriptor */
367         desc = ctx->sh_desc_givenc;
368
369         init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
370
371         /* Generate IV */
372         geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
373                 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
374                 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
375         append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
376                             LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
377         append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
378         append_move(desc, MOVE_SRC_INFIFO |
379                     MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
380         append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
381
382         /* Copy IV to class 1 context */
383         append_move(desc, MOVE_SRC_CLASS1CTX |
384                     MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
385
386         /* Return to encryption */
387         append_operation(desc, ctx->class2_alg_type |
388                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
389
390         /* ivsize + cryptlen = seqoutlen - authsize */
391         append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
392
393         /* assoclen = seqinlen - (ivsize + cryptlen) */
394         append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
395
396         /* read assoc before reading payload */
397         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
398                              KEY_VLF);
399
400         /* Copy iv from class 1 ctx to class 2 fifo*/
401         moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
402                  NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
403         append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
404                             LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
405         append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
406                             LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
407
408         /* Class 1 operation */
409         append_operation(desc, ctx->class1_alg_type |
410                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
411
412         /* Will write ivsize + cryptlen */
413         append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
414
415         /* Not need to reload iv */
416         append_seq_fifo_load(desc, tfm->ivsize,
417                              FIFOLD_CLASS_SKIP);
418
419         /* Will read cryptlen */
420         append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
421         aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
422
423         /* Write ICV */
424         append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
425                          LDST_SRCDST_BYTE_CONTEXT);
426
427         ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
428                                                  desc_bytes(desc),
429                                                  DMA_TO_DEVICE);
430         if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
431                 dev_err(jrdev, "unable to map shared descriptor\n");
432                 return -ENOMEM;
433         }
434 #ifdef DEBUG
435         print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
436                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
437                        desc_bytes(desc), 1);
438 #endif
439
440         return 0;
441 }
442
443 static int aead_setauthsize(struct crypto_aead *authenc,
444                                     unsigned int authsize)
445 {
446         struct caam_ctx *ctx = crypto_aead_ctx(authenc);
447
448         ctx->authsize = authsize;
449         aead_set_sh_desc(authenc);
450
451         return 0;
452 }
453
454 static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
455                               u32 authkeylen)
456 {
457         return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
458                                ctx->split_key_pad_len, key_in, authkeylen,
459                                ctx->alg_op);
460 }
461
462 static int aead_setkey(struct crypto_aead *aead,
463                                const u8 *key, unsigned int keylen)
464 {
465         /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
466         static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
467         struct caam_ctx *ctx = crypto_aead_ctx(aead);
468         struct device *jrdev = ctx->jrdev;
469         struct rtattr *rta = (void *)key;
470         struct crypto_authenc_key_param *param;
471         unsigned int authkeylen;
472         unsigned int enckeylen;
473         int ret = 0;
474
475         param = RTA_DATA(rta);
476         enckeylen = be32_to_cpu(param->enckeylen);
477
478         key += RTA_ALIGN(rta->rta_len);
479         keylen -= RTA_ALIGN(rta->rta_len);
480
481         if (keylen < enckeylen)
482                 goto badkey;
483
484         authkeylen = keylen - enckeylen;
485
486         if (keylen > CAAM_MAX_KEY_SIZE)
487                 goto badkey;
488
489         /* Pick class 2 key length from algorithm submask */
490         ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
491                                       OP_ALG_ALGSEL_SHIFT] * 2;
492         ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
493
494 #ifdef DEBUG
495         printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
496                keylen, enckeylen, authkeylen);
497         printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
498                ctx->split_key_len, ctx->split_key_pad_len);
499         print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
500                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
501 #endif
502
503         ret = gen_split_aead_key(ctx, key, authkeylen);
504         if (ret) {
505                 goto badkey;
506         }
507
508         /* postpend encryption key to auth split key */
509         memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
510
511         ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
512                                        enckeylen, DMA_TO_DEVICE);
513         if (dma_mapping_error(jrdev, ctx->key_dma)) {
514                 dev_err(jrdev, "unable to map key i/o memory\n");
515                 return -ENOMEM;
516         }
517 #ifdef DEBUG
518         print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
519                        DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
520                        ctx->split_key_pad_len + enckeylen, 1);
521 #endif
522
523         ctx->enckeylen = enckeylen;
524
525         ret = aead_set_sh_desc(aead);
526         if (ret) {
527                 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
528                                  enckeylen, DMA_TO_DEVICE);
529         }
530
531         return ret;
532 badkey:
533         crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
534         return -EINVAL;
535 }
536
537 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
538                              const u8 *key, unsigned int keylen)
539 {
540         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
541         struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
542         struct device *jrdev = ctx->jrdev;
543         int ret = 0;
544         u32 *key_jump_cmd, *jump_cmd;
545         u32 *desc;
546
547 #ifdef DEBUG
548         print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
549                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
550 #endif
551
552         memcpy(ctx->key, key, keylen);
553         ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
554                                       DMA_TO_DEVICE);
555         if (dma_mapping_error(jrdev, ctx->key_dma)) {
556                 dev_err(jrdev, "unable to map key i/o memory\n");
557                 return -ENOMEM;
558         }
559         ctx->enckeylen = keylen;
560
561         /* ablkcipher_encrypt shared descriptor */
562         desc = ctx->sh_desc_enc;
563         init_sh_desc(desc, HDR_SHARE_SERIAL);
564         /* Skip if already shared */
565         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
566                                    JUMP_COND_SHRD);
567
568         /* Load class1 key only */
569         append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
570                           ctx->enckeylen, CLASS_1 |
571                           KEY_DEST_CLASS_REG);
572
573         set_jump_tgt_here(desc, key_jump_cmd);
574
575         /* Propagate errors from shared to job descriptor */
576         append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
577
578         /* Load iv */
579         append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
580                    LDST_CLASS_1_CCB | tfm->ivsize);
581
582         /* Load operation */
583         append_operation(desc, ctx->class1_alg_type |
584                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
585
586         /* Perform operation */
587         ablkcipher_append_src_dst(desc);
588
589         ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
590                                               desc_bytes(desc),
591                                               DMA_TO_DEVICE);
592         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
593                 dev_err(jrdev, "unable to map shared descriptor\n");
594                 return -ENOMEM;
595         }
596 #ifdef DEBUG
597         print_hex_dump(KERN_ERR,
598                        "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
599                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
600                        desc_bytes(desc), 1);
601 #endif
602         /* ablkcipher_decrypt shared descriptor */
603         desc = ctx->sh_desc_dec;
604
605         init_sh_desc(desc, HDR_SHARE_SERIAL);
606         /* Skip if already shared */
607         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
608                                    JUMP_COND_SHRD);
609
610         /* Load class1 key only */
611         append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
612                           ctx->enckeylen, CLASS_1 |
613                           KEY_DEST_CLASS_REG);
614
615         /* For aead, only propagate error immediately if shared */
616         jump_cmd = append_jump(desc, JUMP_TEST_ALL);
617         set_jump_tgt_here(desc, key_jump_cmd);
618         append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
619         set_jump_tgt_here(desc, jump_cmd);
620
621         /* load IV */
622         append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
623                    LDST_CLASS_1_CCB | tfm->ivsize);
624
625         /* Choose operation */
626         append_dec_op1(desc, ctx->class1_alg_type);
627
628         /* Perform operation */
629         ablkcipher_append_src_dst(desc);
630
631         /* Wait for key to load before allowing propagating error */
632         append_dec_shr_done(desc);
633
634         ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
635                                               desc_bytes(desc),
636                                               DMA_TO_DEVICE);
637         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
638                 dev_err(jrdev, "unable to map shared descriptor\n");
639                 return -ENOMEM;
640         }
641
642 #ifdef DEBUG
643         print_hex_dump(KERN_ERR,
644                        "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
645                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
646                        desc_bytes(desc), 1);
647 #endif
648
649         return ret;
650 }
651
652 /*
653  * aead_edesc - s/w-extended aead descriptor
654  * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
655  * @assoc_chained: if source is chained
656  * @src_nents: number of segments in input scatterlist
657  * @src_chained: if source is chained
658  * @dst_nents: number of segments in output scatterlist
659  * @dst_chained: if destination is chained
660  * @iv_dma: dma address of iv for checking continuity and link table
661  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
662  * @sec4_sg_bytes: length of dma mapped sec4_sg space
663  * @sec4_sg_dma: bus physical mapped address of h/w link table
664  * @hw_desc: the h/w job descriptor followed by any referenced link tables
665  */
666 struct aead_edesc {
667         int assoc_nents;
668         bool assoc_chained;
669         int src_nents;
670         bool src_chained;
671         int dst_nents;
672         bool dst_chained;
673         dma_addr_t iv_dma;
674         int sec4_sg_bytes;
675         dma_addr_t sec4_sg_dma;
676         struct sec4_sg_entry *sec4_sg;
677         u32 hw_desc[0];
678 };
679
680 /*
681  * ablkcipher_edesc - s/w-extended ablkcipher descriptor
682  * @src_nents: number of segments in input scatterlist
683  * @src_chained: if source is chained
684  * @dst_nents: number of segments in output scatterlist
685  * @dst_chained: if destination is chained
686  * @iv_dma: dma address of iv for checking continuity and link table
687  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
688  * @sec4_sg_bytes: length of dma mapped sec4_sg space
689  * @sec4_sg_dma: bus physical mapped address of h/w link table
690  * @hw_desc: the h/w job descriptor followed by any referenced link tables
691  */
692 struct ablkcipher_edesc {
693         int src_nents;
694         bool src_chained;
695         int dst_nents;
696         bool dst_chained;
697         dma_addr_t iv_dma;
698         int sec4_sg_bytes;
699         dma_addr_t sec4_sg_dma;
700         struct sec4_sg_entry *sec4_sg;
701         u32 hw_desc[0];
702 };
703
704 static void caam_unmap(struct device *dev, struct scatterlist *src,
705                        struct scatterlist *dst, int src_nents,
706                        bool src_chained, int dst_nents, bool dst_chained,
707                        dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
708                        int sec4_sg_bytes)
709 {
710         if (dst != src) {
711                 dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
712                                      src_chained);
713                 dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
714                                      dst_chained);
715         } else {
716                 dma_unmap_sg_chained(dev, src, src_nents ? : 1,
717                                      DMA_BIDIRECTIONAL, src_chained);
718         }
719
720         if (iv_dma)
721                 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
722         if (sec4_sg_bytes)
723                 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
724                                  DMA_TO_DEVICE);
725 }
726
727 static void aead_unmap(struct device *dev,
728                        struct aead_edesc *edesc,
729                        struct aead_request *req)
730 {
731         struct crypto_aead *aead = crypto_aead_reqtfm(req);
732         int ivsize = crypto_aead_ivsize(aead);
733
734         dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
735                              DMA_TO_DEVICE, edesc->assoc_chained);
736
737         caam_unmap(dev, req->src, req->dst,
738                    edesc->src_nents, edesc->src_chained, edesc->dst_nents,
739                    edesc->dst_chained, edesc->iv_dma, ivsize,
740                    edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
741 }
742
743 static void ablkcipher_unmap(struct device *dev,
744                              struct ablkcipher_edesc *edesc,
745                              struct ablkcipher_request *req)
746 {
747         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
748         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
749
750         caam_unmap(dev, req->src, req->dst,
751                    edesc->src_nents, edesc->src_chained, edesc->dst_nents,
752                    edesc->dst_chained, edesc->iv_dma, ivsize,
753                    edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
754 }
755
756 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
757                                    void *context)
758 {
759         struct aead_request *req = context;
760         struct aead_edesc *edesc;
761 #ifdef DEBUG
762         struct crypto_aead *aead = crypto_aead_reqtfm(req);
763         struct caam_ctx *ctx = crypto_aead_ctx(aead);
764         int ivsize = crypto_aead_ivsize(aead);
765
766         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
767 #endif
768
769         edesc = (struct aead_edesc *)((char *)desc -
770                  offsetof(struct aead_edesc, hw_desc));
771
772         if (err) {
773                 char tmp[CAAM_ERROR_STR_MAX];
774
775                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
776         }
777
778         aead_unmap(jrdev, edesc, req);
779
780 #ifdef DEBUG
781         print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
782                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
783                        req->assoclen , 1);
784         print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
785                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
786                        edesc->src_nents ? 100 : ivsize, 1);
787         print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
788                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
789                        edesc->src_nents ? 100 : req->cryptlen +
790                        ctx->authsize + 4, 1);
791 #endif
792
793         kfree(edesc);
794
795         aead_request_complete(req, err);
796 }
797
798 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
799                                    void *context)
800 {
801         struct aead_request *req = context;
802         struct aead_edesc *edesc;
803 #ifdef DEBUG
804         struct crypto_aead *aead = crypto_aead_reqtfm(req);
805         struct caam_ctx *ctx = crypto_aead_ctx(aead);
806         int ivsize = crypto_aead_ivsize(aead);
807
808         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
809 #endif
810
811         edesc = (struct aead_edesc *)((char *)desc -
812                  offsetof(struct aead_edesc, hw_desc));
813
814 #ifdef DEBUG
815         print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
816                        DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
817                        ivsize, 1);
818         print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
819                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
820                        req->cryptlen, 1);
821 #endif
822
823         if (err) {
824                 char tmp[CAAM_ERROR_STR_MAX];
825
826                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
827         }
828
829         aead_unmap(jrdev, edesc, req);
830
831         /*
832          * verify hw auth check passed else return -EBADMSG
833          */
834         if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
835                 err = -EBADMSG;
836
837 #ifdef DEBUG
838         print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
839                        DUMP_PREFIX_ADDRESS, 16, 4,
840                        ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
841                        sizeof(struct iphdr) + req->assoclen +
842                        ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
843                        ctx->authsize + 36, 1);
844         if (!err && edesc->sec4_sg_bytes) {
845                 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
846                 print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
847                                DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
848                         sg->length + ctx->authsize + 16, 1);
849         }
850 #endif
851
852         kfree(edesc);
853
854         aead_request_complete(req, err);
855 }
856
857 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
858                                    void *context)
859 {
860         struct ablkcipher_request *req = context;
861         struct ablkcipher_edesc *edesc;
862 #ifdef DEBUG
863         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
864         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
865
866         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
867 #endif
868
869         edesc = (struct ablkcipher_edesc *)((char *)desc -
870                  offsetof(struct ablkcipher_edesc, hw_desc));
871
872         if (err) {
873                 char tmp[CAAM_ERROR_STR_MAX];
874
875                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
876         }
877
878 #ifdef DEBUG
879         print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
880                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
881                        edesc->src_nents > 1 ? 100 : ivsize, 1);
882         print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
883                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
884                        edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
885 #endif
886
887         ablkcipher_unmap(jrdev, edesc, req);
888         kfree(edesc);
889
890         ablkcipher_request_complete(req, err);
891 }
892
893 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
894                                     void *context)
895 {
896         struct ablkcipher_request *req = context;
897         struct ablkcipher_edesc *edesc;
898 #ifdef DEBUG
899         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
900         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
901
902         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
903 #endif
904
905         edesc = (struct ablkcipher_edesc *)((char *)desc -
906                  offsetof(struct ablkcipher_edesc, hw_desc));
907         if (err) {
908                 char tmp[CAAM_ERROR_STR_MAX];
909
910                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
911         }
912
913 #ifdef DEBUG
914         print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
915                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
916                        ivsize, 1);
917         print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
918                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
919                        edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
920 #endif
921
922         ablkcipher_unmap(jrdev, edesc, req);
923         kfree(edesc);
924
925         ablkcipher_request_complete(req, err);
926 }
927
928 /*
929  * Fill in aead job descriptor
930  */
931 static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
932                           struct aead_edesc *edesc,
933                           struct aead_request *req,
934                           bool all_contig, bool encrypt)
935 {
936         struct crypto_aead *aead = crypto_aead_reqtfm(req);
937         struct caam_ctx *ctx = crypto_aead_ctx(aead);
938         int ivsize = crypto_aead_ivsize(aead);
939         int authsize = ctx->authsize;
940         u32 *desc = edesc->hw_desc;
941         u32 out_options = 0, in_options;
942         dma_addr_t dst_dma, src_dma;
943         int len, sec4_sg_index = 0;
944
945 #ifdef DEBUG
946         debug("assoclen %d cryptlen %d authsize %d\n",
947               req->assoclen, req->cryptlen, authsize);
948         print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
949                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
950                        req->assoclen , 1);
951         print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
952                        DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
953                        edesc->src_nents ? 100 : ivsize, 1);
954         print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
955                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
956                         edesc->src_nents ? 100 : req->cryptlen, 1);
957         print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
958                        DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
959                        desc_bytes(sh_desc), 1);
960 #endif
961
962         len = desc_len(sh_desc);
963         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
964
965         if (all_contig) {
966                 src_dma = sg_dma_address(req->assoc);
967                 in_options = 0;
968         } else {
969                 src_dma = edesc->sec4_sg_dma;
970                 sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
971                                  (edesc->src_nents ? : 1);
972                 in_options = LDST_SGF;
973         }
974         if (encrypt)
975                 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
976                                   req->cryptlen - authsize, in_options);
977         else
978                 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
979                                   req->cryptlen, in_options);
980
981         if (likely(req->src == req->dst)) {
982                 if (all_contig) {
983                         dst_dma = sg_dma_address(req->src);
984                 } else {
985                         dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
986                                   ((edesc->assoc_nents ? : 1) + 1);
987                         out_options = LDST_SGF;
988                 }
989         } else {
990                 if (!edesc->dst_nents) {
991                         dst_dma = sg_dma_address(req->dst);
992                 } else {
993                         dst_dma = edesc->sec4_sg_dma +
994                                   sec4_sg_index *
995                                   sizeof(struct sec4_sg_entry);
996                         out_options = LDST_SGF;
997                 }
998         }
999         if (encrypt)
1000                 append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
1001         else
1002                 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
1003                                    out_options);
1004 }
1005
1006 /*
1007  * Fill in aead givencrypt job descriptor
1008  */
1009 static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1010                               struct aead_edesc *edesc,
1011                               struct aead_request *req,
1012                               int contig)
1013 {
1014         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1015         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1016         int ivsize = crypto_aead_ivsize(aead);
1017         int authsize = ctx->authsize;
1018         u32 *desc = edesc->hw_desc;
1019         u32 out_options = 0, in_options;
1020         dma_addr_t dst_dma, src_dma;
1021         int len, sec4_sg_index = 0;
1022
1023 #ifdef DEBUG
1024         debug("assoclen %d cryptlen %d authsize %d\n",
1025               req->assoclen, req->cryptlen, authsize);
1026         print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
1027                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1028                        req->assoclen , 1);
1029         print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
1030                        DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
1031         print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
1032                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1033                         edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
1034         print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
1035                        DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1036                        desc_bytes(sh_desc), 1);
1037 #endif
1038
1039         len = desc_len(sh_desc);
1040         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1041
1042         if (contig & GIV_SRC_CONTIG) {
1043                 src_dma = sg_dma_address(req->assoc);
1044                 in_options = 0;
1045         } else {
1046                 src_dma = edesc->sec4_sg_dma;
1047                 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
1048                 in_options = LDST_SGF;
1049         }
1050         append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
1051                           req->cryptlen - authsize, in_options);
1052
1053         if (contig & GIV_DST_CONTIG) {
1054                 dst_dma = edesc->iv_dma;
1055         } else {
1056                 if (likely(req->src == req->dst)) {
1057                         dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
1058                                   edesc->assoc_nents;
1059                         out_options = LDST_SGF;
1060                 } else {
1061                         dst_dma = edesc->sec4_sg_dma +
1062                                   sec4_sg_index *
1063                                   sizeof(struct sec4_sg_entry);
1064                         out_options = LDST_SGF;
1065                 }
1066         }
1067
1068         append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options);
1069 }
1070
1071 /*
1072  * Fill in ablkcipher job descriptor
1073  */
1074 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1075                                 struct ablkcipher_edesc *edesc,
1076                                 struct ablkcipher_request *req,
1077                                 bool iv_contig)
1078 {
1079         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1080         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1081         u32 *desc = edesc->hw_desc;
1082         u32 out_options = 0, in_options;
1083         dma_addr_t dst_dma, src_dma;
1084         int len, sec4_sg_index = 0;
1085
1086 #ifdef DEBUG
1087         print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
1088                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1089                        ivsize, 1);
1090         print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
1091                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1092                        edesc->src_nents ? 100 : req->nbytes, 1);
1093 #endif
1094
1095         len = desc_len(sh_desc);
1096         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1097
1098         if (iv_contig) {
1099                 src_dma = edesc->iv_dma;
1100                 in_options = 0;
1101         } else {
1102                 src_dma = edesc->sec4_sg_dma;
1103                 sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
1104                 in_options = LDST_SGF;
1105         }
1106         append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
1107
1108         if (likely(req->src == req->dst)) {
1109                 if (!edesc->src_nents && iv_contig) {
1110                         dst_dma = sg_dma_address(req->src);
1111                 } else {
1112                         dst_dma = edesc->sec4_sg_dma +
1113                                 sizeof(struct sec4_sg_entry);
1114                         out_options = LDST_SGF;
1115                 }
1116         } else {
1117                 if (!edesc->dst_nents) {
1118                         dst_dma = sg_dma_address(req->dst);
1119                 } else {
1120                         dst_dma = edesc->sec4_sg_dma +
1121                                 sec4_sg_index * sizeof(struct sec4_sg_entry);
1122                         out_options = LDST_SGF;
1123                 }
1124         }
1125         append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1126 }
1127
1128 /*
1129  * allocate and map the aead extended descriptor
1130  */
1131 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1132                                            int desc_bytes, bool *all_contig_ptr)
1133 {
1134         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1135         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1136         struct device *jrdev = ctx->jrdev;
1137         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1138                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1139         int assoc_nents, src_nents, dst_nents = 0;
1140         struct aead_edesc *edesc;
1141         dma_addr_t iv_dma = 0;
1142         int sgc;
1143         bool all_contig = true;
1144         bool assoc_chained = false, src_chained = false, dst_chained = false;
1145         int ivsize = crypto_aead_ivsize(aead);
1146         int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1147
1148         assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1149         src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1150
1151         if (unlikely(req->dst != req->src))
1152                 dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
1153
1154         sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1155                                  DMA_TO_DEVICE, assoc_chained);
1156         if (likely(req->src == req->dst)) {
1157                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1158                                          DMA_BIDIRECTIONAL, src_chained);
1159         } else {
1160                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1161                                          DMA_TO_DEVICE, src_chained);
1162                 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1163                                          DMA_FROM_DEVICE, dst_chained);
1164         }
1165
1166         /* Check if data are contiguous */
1167         iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
1168         if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1169             iv_dma || src_nents || iv_dma + ivsize !=
1170             sg_dma_address(req->src)) {
1171                 all_contig = false;
1172                 assoc_nents = assoc_nents ? : 1;
1173                 src_nents = src_nents ? : 1;
1174                 sec4_sg_len = assoc_nents + 1 + src_nents;
1175         }
1176         sec4_sg_len += dst_nents;
1177
1178         sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1179
1180         /* allocate space for base edesc and hw desc commands, link tables */
1181         edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1182                         sec4_sg_bytes, GFP_DMA | flags);
1183         if (!edesc) {
1184                 dev_err(jrdev, "could not allocate extended descriptor\n");
1185                 return ERR_PTR(-ENOMEM);
1186         }
1187
1188         edesc->assoc_nents = assoc_nents;
1189         edesc->assoc_chained = assoc_chained;
1190         edesc->src_nents = src_nents;
1191         edesc->src_chained = src_chained;
1192         edesc->dst_nents = dst_nents;
1193         edesc->dst_chained = dst_chained;
1194         edesc->iv_dma = iv_dma;
1195         edesc->sec4_sg_bytes = sec4_sg_bytes;
1196         edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1197                          desc_bytes;
1198         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1199                                             sec4_sg_bytes, DMA_TO_DEVICE);
1200         *all_contig_ptr = all_contig;
1201
1202         sec4_sg_index = 0;
1203         if (!all_contig) {
1204                 sg_to_sec4_sg(req->assoc,
1205                               (assoc_nents ? : 1),
1206                               edesc->sec4_sg +
1207                               sec4_sg_index, 0);
1208                 sec4_sg_index += assoc_nents ? : 1;
1209                 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1210                                    iv_dma, ivsize, 0);
1211                 sec4_sg_index += 1;
1212                 sg_to_sec4_sg_last(req->src,
1213                                    (src_nents ? : 1),
1214                                    edesc->sec4_sg +
1215                                    sec4_sg_index, 0);
1216                 sec4_sg_index += src_nents ? : 1;
1217         }
1218         if (dst_nents) {
1219                 sg_to_sec4_sg_last(req->dst, dst_nents,
1220                                    edesc->sec4_sg + sec4_sg_index, 0);
1221         }
1222
1223         return edesc;
1224 }
1225
1226 static int aead_encrypt(struct aead_request *req)
1227 {
1228         struct aead_edesc *edesc;
1229         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1230         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1231         struct device *jrdev = ctx->jrdev;
1232         bool all_contig;
1233         u32 *desc;
1234         int ret = 0;
1235
1236         req->cryptlen += ctx->authsize;
1237
1238         /* allocate extended descriptor */
1239         edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1240                                  CAAM_CMD_SZ, &all_contig);
1241         if (IS_ERR(edesc))
1242                 return PTR_ERR(edesc);
1243
1244         /* Create and submit job descriptor */
1245         init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
1246                       all_contig, true);
1247 #ifdef DEBUG
1248         print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1249                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1250                        desc_bytes(edesc->hw_desc), 1);
1251 #endif
1252
1253         desc = edesc->hw_desc;
1254         ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1255         if (!ret) {
1256                 ret = -EINPROGRESS;
1257         } else {
1258                 aead_unmap(jrdev, edesc, req);
1259                 kfree(edesc);
1260         }
1261
1262         return ret;
1263 }
1264
1265 static int aead_decrypt(struct aead_request *req)
1266 {
1267         struct aead_edesc *edesc;
1268         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1269         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1270         struct device *jrdev = ctx->jrdev;
1271         bool all_contig;
1272         u32 *desc;
1273         int ret = 0;
1274
1275         /* allocate extended descriptor */
1276         edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1277                                  CAAM_CMD_SZ, &all_contig);
1278         if (IS_ERR(edesc))
1279                 return PTR_ERR(edesc);
1280
1281 #ifdef DEBUG
1282         print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
1283                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1284                        req->cryptlen, 1);
1285 #endif
1286
1287         /* Create and submit job descriptor*/
1288         init_aead_job(ctx->sh_desc_dec,
1289                       ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
1290 #ifdef DEBUG
1291         print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1292                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1293                        desc_bytes(edesc->hw_desc), 1);
1294 #endif
1295
1296         desc = edesc->hw_desc;
1297         ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1298         if (!ret) {
1299                 ret = -EINPROGRESS;
1300         } else {
1301                 aead_unmap(jrdev, edesc, req);
1302                 kfree(edesc);
1303         }
1304
1305         return ret;
1306 }
1307
1308 /*
1309  * allocate and map the aead extended descriptor for aead givencrypt
1310  */
1311 static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1312                                                *greq, int desc_bytes,
1313                                                u32 *contig_ptr)
1314 {
1315         struct aead_request *req = &greq->areq;
1316         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1317         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1318         struct device *jrdev = ctx->jrdev;
1319         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1320                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1321         int assoc_nents, src_nents, dst_nents = 0;
1322         struct aead_edesc *edesc;
1323         dma_addr_t iv_dma = 0;
1324         int sgc;
1325         u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
1326         int ivsize = crypto_aead_ivsize(aead);
1327         bool assoc_chained = false, src_chained = false, dst_chained = false;
1328         int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1329
1330         assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1331         src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1332
1333         if (unlikely(req->dst != req->src))
1334                 dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
1335
1336         sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1337                                  DMA_TO_DEVICE, assoc_chained);
1338         if (likely(req->src == req->dst)) {
1339                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1340                                          DMA_BIDIRECTIONAL, src_chained);
1341         } else {
1342                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1343                                          DMA_TO_DEVICE, src_chained);
1344                 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1345                                          DMA_FROM_DEVICE, dst_chained);
1346         }
1347
1348         /* Check if data are contiguous */
1349         iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
1350         if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1351             iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
1352                 contig &= ~GIV_SRC_CONTIG;
1353         if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
1354                 contig &= ~GIV_DST_CONTIG;
1355         if (unlikely(req->src != req->dst)) {
1356                 dst_nents = dst_nents ? : 1;
1357                 sec4_sg_len += 1;
1358         }
1359         if (!(contig & GIV_SRC_CONTIG)) {
1360                 assoc_nents = assoc_nents ? : 1;
1361                 src_nents = src_nents ? : 1;
1362                 sec4_sg_len += assoc_nents + 1 + src_nents;
1363                 if (likely(req->src == req->dst))
1364                         contig &= ~GIV_DST_CONTIG;
1365         }
1366         sec4_sg_len += dst_nents;
1367
1368         sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1369
1370         /* allocate space for base edesc and hw desc commands, link tables */
1371         edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1372                         sec4_sg_bytes, GFP_DMA | flags);
1373         if (!edesc) {
1374                 dev_err(jrdev, "could not allocate extended descriptor\n");
1375                 return ERR_PTR(-ENOMEM);
1376         }
1377
1378         edesc->assoc_nents = assoc_nents;
1379         edesc->assoc_chained = assoc_chained;
1380         edesc->src_nents = src_nents;
1381         edesc->src_chained = src_chained;
1382         edesc->dst_nents = dst_nents;
1383         edesc->dst_chained = dst_chained;
1384         edesc->iv_dma = iv_dma;
1385         edesc->sec4_sg_bytes = sec4_sg_bytes;
1386         edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1387                          desc_bytes;
1388         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1389                                             sec4_sg_bytes, DMA_TO_DEVICE);
1390         *contig_ptr = contig;
1391
1392         sec4_sg_index = 0;
1393         if (!(contig & GIV_SRC_CONTIG)) {
1394                 sg_to_sec4_sg(req->assoc, assoc_nents,
1395                               edesc->sec4_sg +
1396                               sec4_sg_index, 0);
1397                 sec4_sg_index += assoc_nents;
1398                 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1399                                    iv_dma, ivsize, 0);
1400                 sec4_sg_index += 1;
1401                 sg_to_sec4_sg_last(req->src, src_nents,
1402                                    edesc->sec4_sg +
1403                                    sec4_sg_index, 0);
1404                 sec4_sg_index += src_nents;
1405         }
1406         if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
1407                 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1408                                    iv_dma, ivsize, 0);
1409                 sec4_sg_index += 1;
1410                 sg_to_sec4_sg_last(req->dst, dst_nents,
1411                                    edesc->sec4_sg + sec4_sg_index, 0);
1412         }
1413
1414         return edesc;
1415 }
1416
1417 static int aead_givencrypt(struct aead_givcrypt_request *areq)
1418 {
1419         struct aead_request *req = &areq->areq;
1420         struct aead_edesc *edesc;
1421         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1422         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1423         struct device *jrdev = ctx->jrdev;
1424         u32 contig;
1425         u32 *desc;
1426         int ret = 0;
1427
1428         req->cryptlen += ctx->authsize;
1429
1430         /* allocate extended descriptor */
1431         edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
1432                                      CAAM_CMD_SZ, &contig);
1433
1434         if (IS_ERR(edesc))
1435                 return PTR_ERR(edesc);
1436
1437 #ifdef DEBUG
1438         print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
1439                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1440                        req->cryptlen, 1);
1441 #endif
1442
1443         /* Create and submit job descriptor*/
1444         init_aead_giv_job(ctx->sh_desc_givenc,
1445                           ctx->sh_desc_givenc_dma, edesc, req, contig);
1446 #ifdef DEBUG
1447         print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1448                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1449                        desc_bytes(edesc->hw_desc), 1);
1450 #endif
1451
1452         desc = edesc->hw_desc;
1453         ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1454         if (!ret) {
1455                 ret = -EINPROGRESS;
1456         } else {
1457                 aead_unmap(jrdev, edesc, req);
1458                 kfree(edesc);
1459         }
1460
1461         return ret;
1462 }
1463
1464 /*
1465  * allocate and map the ablkcipher extended descriptor for ablkcipher
1466  */
1467 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1468                                                        *req, int desc_bytes,
1469                                                        bool *iv_contig_out)
1470 {
1471         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1472         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1473         struct device *jrdev = ctx->jrdev;
1474         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1475                                           CRYPTO_TFM_REQ_MAY_SLEEP)) ?
1476                        GFP_KERNEL : GFP_ATOMIC;
1477         int src_nents, dst_nents = 0, sec4_sg_bytes;
1478         struct ablkcipher_edesc *edesc;
1479         dma_addr_t iv_dma = 0;
1480         bool iv_contig = false;
1481         int sgc;
1482         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1483         bool src_chained = false, dst_chained = false;
1484         int sec4_sg_index;
1485
1486         src_nents = sg_count(req->src, req->nbytes, &src_chained);
1487
1488         if (req->dst != req->src)
1489                 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
1490
1491         if (likely(req->src == req->dst)) {
1492                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1493                                          DMA_BIDIRECTIONAL, src_chained);
1494         } else {
1495                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1496                                          DMA_TO_DEVICE, src_chained);
1497                 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1498                                          DMA_FROM_DEVICE, dst_chained);
1499         }
1500
1501         /*
1502          * Check if iv can be contiguous with source and destination.
1503          * If so, include it. If not, create scatterlist.
1504          */
1505         iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
1506         if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
1507                 iv_contig = true;
1508         else
1509                 src_nents = src_nents ? : 1;
1510         sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1511                         sizeof(struct sec4_sg_entry);
1512
1513         /* allocate space for base edesc and hw desc commands, link tables */
1514         edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
1515                         sec4_sg_bytes, GFP_DMA | flags);
1516         if (!edesc) {
1517                 dev_err(jrdev, "could not allocate extended descriptor\n");
1518                 return ERR_PTR(-ENOMEM);
1519         }
1520
1521         edesc->src_nents = src_nents;
1522         edesc->src_chained = src_chained;
1523         edesc->dst_nents = dst_nents;
1524         edesc->dst_chained = dst_chained;
1525         edesc->sec4_sg_bytes = sec4_sg_bytes;
1526         edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1527                          desc_bytes;
1528
1529         sec4_sg_index = 0;
1530         if (!iv_contig) {
1531                 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1532                 sg_to_sec4_sg_last(req->src, src_nents,
1533                                    edesc->sec4_sg + 1, 0);
1534                 sec4_sg_index += 1 + src_nents;
1535         }
1536
1537         if (dst_nents) {
1538                 sg_to_sec4_sg_last(req->dst, dst_nents,
1539                         edesc->sec4_sg + sec4_sg_index, 0);
1540         }
1541
1542         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1543                                             sec4_sg_bytes, DMA_TO_DEVICE);
1544         edesc->iv_dma = iv_dma;
1545
1546 #ifdef DEBUG
1547         print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
1548                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1549                        sec4_sg_bytes, 1);
1550 #endif
1551
1552         *iv_contig_out = iv_contig;
1553         return edesc;
1554 }
1555
1556 static int ablkcipher_encrypt(struct ablkcipher_request *req)
1557 {
1558         struct ablkcipher_edesc *edesc;
1559         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1560         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1561         struct device *jrdev = ctx->jrdev;
1562         bool iv_contig;
1563         u32 *desc;
1564         int ret = 0;
1565
1566         /* allocate extended descriptor */
1567         edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1568                                        CAAM_CMD_SZ, &iv_contig);
1569         if (IS_ERR(edesc))
1570                 return PTR_ERR(edesc);
1571
1572         /* Create and submit job descriptor*/
1573         init_ablkcipher_job(ctx->sh_desc_enc,
1574                 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
1575 #ifdef DEBUG
1576         print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
1577                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1578                        desc_bytes(edesc->hw_desc), 1);
1579 #endif
1580         desc = edesc->hw_desc;
1581         ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1582
1583         if (!ret) {
1584                 ret = -EINPROGRESS;
1585         } else {
1586                 ablkcipher_unmap(jrdev, edesc, req);
1587                 kfree(edesc);
1588         }
1589
1590         return ret;
1591 }
1592
1593 static int ablkcipher_decrypt(struct ablkcipher_request *req)
1594 {
1595         struct ablkcipher_edesc *edesc;
1596         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1597         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1598         struct device *jrdev = ctx->jrdev;
1599         bool iv_contig;
1600         u32 *desc;
1601         int ret = 0;
1602
1603         /* allocate extended descriptor */
1604         edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1605                                        CAAM_CMD_SZ, &iv_contig);
1606         if (IS_ERR(edesc))
1607                 return PTR_ERR(edesc);
1608
1609         /* Create and submit job descriptor*/
1610         init_ablkcipher_job(ctx->sh_desc_dec,
1611                 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
1612         desc = edesc->hw_desc;
1613 #ifdef DEBUG
1614         print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
1615                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1616                        desc_bytes(edesc->hw_desc), 1);
1617 #endif
1618
1619         ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
1620         if (!ret) {
1621                 ret = -EINPROGRESS;
1622         } else {
1623                 ablkcipher_unmap(jrdev, edesc, req);
1624                 kfree(edesc);
1625         }
1626
1627         return ret;
1628 }
1629
1630 #define template_aead           template_u.aead
1631 #define template_ablkcipher     template_u.ablkcipher
1632 struct caam_alg_template {
1633         char name[CRYPTO_MAX_ALG_NAME];
1634         char driver_name[CRYPTO_MAX_ALG_NAME];
1635         unsigned int blocksize;
1636         u32 type;
1637         union {
1638                 struct ablkcipher_alg ablkcipher;
1639                 struct aead_alg aead;
1640                 struct blkcipher_alg blkcipher;
1641                 struct cipher_alg cipher;
1642                 struct compress_alg compress;
1643                 struct rng_alg rng;
1644         } template_u;
1645         u32 class1_alg_type;
1646         u32 class2_alg_type;
1647         u32 alg_op;
1648 };
1649
1650 static struct caam_alg_template driver_algs[] = {
1651         /* single-pass ipsec_esp descriptor */
1652         {
1653                 .name = "authenc(hmac(md5),cbc(aes))",
1654                 .driver_name = "authenc-hmac-md5-cbc-aes-caam",
1655                 .blocksize = AES_BLOCK_SIZE,
1656                 .type = CRYPTO_ALG_TYPE_AEAD,
1657                 .template_aead = {
1658                         .setkey = aead_setkey,
1659                         .setauthsize = aead_setauthsize,
1660                         .encrypt = aead_encrypt,
1661                         .decrypt = aead_decrypt,
1662                         .givencrypt = aead_givencrypt,
1663                         .geniv = "<built-in>",
1664                         .ivsize = AES_BLOCK_SIZE,
1665                         .maxauthsize = MD5_DIGEST_SIZE,
1666                         },
1667                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1668                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1669                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1670         },
1671         {
1672                 .name = "authenc(hmac(sha1),cbc(aes))",
1673                 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
1674                 .blocksize = AES_BLOCK_SIZE,
1675                 .type = CRYPTO_ALG_TYPE_AEAD,
1676                 .template_aead = {
1677                         .setkey = aead_setkey,
1678                         .setauthsize = aead_setauthsize,
1679                         .encrypt = aead_encrypt,
1680                         .decrypt = aead_decrypt,
1681                         .givencrypt = aead_givencrypt,
1682                         .geniv = "<built-in>",
1683                         .ivsize = AES_BLOCK_SIZE,
1684                         .maxauthsize = SHA1_DIGEST_SIZE,
1685                         },
1686                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1687                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1688                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1689         },
1690         {
1691                 .name = "authenc(hmac(sha224),cbc(aes))",
1692                 .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
1693                 .blocksize = AES_BLOCK_SIZE,
1694                 .type = CRYPTO_ALG_TYPE_AEAD,
1695                 .template_aead = {
1696                         .setkey = aead_setkey,
1697                         .setauthsize = aead_setauthsize,
1698                         .encrypt = aead_encrypt,
1699                         .decrypt = aead_decrypt,
1700                         .givencrypt = aead_givencrypt,
1701                         .geniv = "<built-in>",
1702                         .ivsize = AES_BLOCK_SIZE,
1703                         .maxauthsize = SHA224_DIGEST_SIZE,
1704                         },
1705                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1706                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1707                                    OP_ALG_AAI_HMAC_PRECOMP,
1708                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1709         },
1710         {
1711                 .name = "authenc(hmac(sha256),cbc(aes))",
1712                 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
1713                 .blocksize = AES_BLOCK_SIZE,
1714                 .type = CRYPTO_ALG_TYPE_AEAD,
1715                 .template_aead = {
1716                         .setkey = aead_setkey,
1717                         .setauthsize = aead_setauthsize,
1718                         .encrypt = aead_encrypt,
1719                         .decrypt = aead_decrypt,
1720                         .givencrypt = aead_givencrypt,
1721                         .geniv = "<built-in>",
1722                         .ivsize = AES_BLOCK_SIZE,
1723                         .maxauthsize = SHA256_DIGEST_SIZE,
1724                         },
1725                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1726                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1727                                    OP_ALG_AAI_HMAC_PRECOMP,
1728                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1729         },
1730         {
1731                 .name = "authenc(hmac(sha384),cbc(aes))",
1732                 .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
1733                 .blocksize = AES_BLOCK_SIZE,
1734                 .type = CRYPTO_ALG_TYPE_AEAD,
1735                 .template_aead = {
1736                         .setkey = aead_setkey,
1737                         .setauthsize = aead_setauthsize,
1738                         .encrypt = aead_encrypt,
1739                         .decrypt = aead_decrypt,
1740                         .givencrypt = aead_givencrypt,
1741                         .geniv = "<built-in>",
1742                         .ivsize = AES_BLOCK_SIZE,
1743                         .maxauthsize = SHA384_DIGEST_SIZE,
1744                         },
1745                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1746                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1747                                    OP_ALG_AAI_HMAC_PRECOMP,
1748                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1749         },
1750
1751         {
1752                 .name = "authenc(hmac(sha512),cbc(aes))",
1753                 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
1754                 .blocksize = AES_BLOCK_SIZE,
1755                 .type = CRYPTO_ALG_TYPE_AEAD,
1756                 .template_aead = {
1757                         .setkey = aead_setkey,
1758                         .setauthsize = aead_setauthsize,
1759                         .encrypt = aead_encrypt,
1760                         .decrypt = aead_decrypt,
1761                         .givencrypt = aead_givencrypt,
1762                         .geniv = "<built-in>",
1763                         .ivsize = AES_BLOCK_SIZE,
1764                         .maxauthsize = SHA512_DIGEST_SIZE,
1765                         },
1766                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1767                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1768                                    OP_ALG_AAI_HMAC_PRECOMP,
1769                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1770         },
1771         {
1772                 .name = "authenc(hmac(md5),cbc(des3_ede))",
1773                 .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
1774                 .blocksize = DES3_EDE_BLOCK_SIZE,
1775                 .type = CRYPTO_ALG_TYPE_AEAD,
1776                 .template_aead = {
1777                         .setkey = aead_setkey,
1778                         .setauthsize = aead_setauthsize,
1779                         .encrypt = aead_encrypt,
1780                         .decrypt = aead_decrypt,
1781                         .givencrypt = aead_givencrypt,
1782                         .geniv = "<built-in>",
1783                         .ivsize = DES3_EDE_BLOCK_SIZE,
1784                         .maxauthsize = MD5_DIGEST_SIZE,
1785                         },
1786                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1787                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1788                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1789         },
1790         {
1791                 .name = "authenc(hmac(sha1),cbc(des3_ede))",
1792                 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
1793                 .blocksize = DES3_EDE_BLOCK_SIZE,
1794                 .type = CRYPTO_ALG_TYPE_AEAD,
1795                 .template_aead = {
1796                         .setkey = aead_setkey,
1797                         .setauthsize = aead_setauthsize,
1798                         .encrypt = aead_encrypt,
1799                         .decrypt = aead_decrypt,
1800                         .givencrypt = aead_givencrypt,
1801                         .geniv = "<built-in>",
1802                         .ivsize = DES3_EDE_BLOCK_SIZE,
1803                         .maxauthsize = SHA1_DIGEST_SIZE,
1804                         },
1805                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1806                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1807                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1808         },
1809         {
1810                 .name = "authenc(hmac(sha224),cbc(des3_ede))",
1811                 .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
1812                 .blocksize = DES3_EDE_BLOCK_SIZE,
1813                 .type = CRYPTO_ALG_TYPE_AEAD,
1814                 .template_aead = {
1815                         .setkey = aead_setkey,
1816                         .setauthsize = aead_setauthsize,
1817                         .encrypt = aead_encrypt,
1818                         .decrypt = aead_decrypt,
1819                         .givencrypt = aead_givencrypt,
1820                         .geniv = "<built-in>",
1821                         .ivsize = DES3_EDE_BLOCK_SIZE,
1822                         .maxauthsize = SHA224_DIGEST_SIZE,
1823                         },
1824                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1825                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1826                                    OP_ALG_AAI_HMAC_PRECOMP,
1827                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1828         },
1829         {
1830                 .name = "authenc(hmac(sha256),cbc(des3_ede))",
1831                 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
1832                 .blocksize = DES3_EDE_BLOCK_SIZE,
1833                 .type = CRYPTO_ALG_TYPE_AEAD,
1834                 .template_aead = {
1835                         .setkey = aead_setkey,
1836                         .setauthsize = aead_setauthsize,
1837                         .encrypt = aead_encrypt,
1838                         .decrypt = aead_decrypt,
1839                         .givencrypt = aead_givencrypt,
1840                         .geniv = "<built-in>",
1841                         .ivsize = DES3_EDE_BLOCK_SIZE,
1842                         .maxauthsize = SHA256_DIGEST_SIZE,
1843                         },
1844                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1845                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1846                                    OP_ALG_AAI_HMAC_PRECOMP,
1847                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1848         },
1849         {
1850                 .name = "authenc(hmac(sha384),cbc(des3_ede))",
1851                 .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
1852                 .blocksize = DES3_EDE_BLOCK_SIZE,
1853                 .type = CRYPTO_ALG_TYPE_AEAD,
1854                 .template_aead = {
1855                         .setkey = aead_setkey,
1856                         .setauthsize = aead_setauthsize,
1857                         .encrypt = aead_encrypt,
1858                         .decrypt = aead_decrypt,
1859                         .givencrypt = aead_givencrypt,
1860                         .geniv = "<built-in>",
1861                         .ivsize = DES3_EDE_BLOCK_SIZE,
1862                         .maxauthsize = SHA384_DIGEST_SIZE,
1863                         },
1864                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1865                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1866                                    OP_ALG_AAI_HMAC_PRECOMP,
1867                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1868         },
1869         {
1870                 .name = "authenc(hmac(sha512),cbc(des3_ede))",
1871                 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
1872                 .blocksize = DES3_EDE_BLOCK_SIZE,
1873                 .type = CRYPTO_ALG_TYPE_AEAD,
1874                 .template_aead = {
1875                         .setkey = aead_setkey,
1876                         .setauthsize = aead_setauthsize,
1877                         .encrypt = aead_encrypt,
1878                         .decrypt = aead_decrypt,
1879                         .givencrypt = aead_givencrypt,
1880                         .geniv = "<built-in>",
1881                         .ivsize = DES3_EDE_BLOCK_SIZE,
1882                         .maxauthsize = SHA512_DIGEST_SIZE,
1883                         },
1884                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1885                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1886                                    OP_ALG_AAI_HMAC_PRECOMP,
1887                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1888         },
1889         {
1890                 .name = "authenc(hmac(md5),cbc(des))",
1891                 .driver_name = "authenc-hmac-md5-cbc-des-caam",
1892                 .blocksize = DES_BLOCK_SIZE,
1893                 .type = CRYPTO_ALG_TYPE_AEAD,
1894                 .template_aead = {
1895                         .setkey = aead_setkey,
1896                         .setauthsize = aead_setauthsize,
1897                         .encrypt = aead_encrypt,
1898                         .decrypt = aead_decrypt,
1899                         .givencrypt = aead_givencrypt,
1900                         .geniv = "<built-in>",
1901                         .ivsize = DES_BLOCK_SIZE,
1902                         .maxauthsize = MD5_DIGEST_SIZE,
1903                         },
1904                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1905                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1906                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1907         },
1908         {
1909                 .name = "authenc(hmac(sha1),cbc(des))",
1910                 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
1911                 .blocksize = DES_BLOCK_SIZE,
1912                 .type = CRYPTO_ALG_TYPE_AEAD,
1913                 .template_aead = {
1914                         .setkey = aead_setkey,
1915                         .setauthsize = aead_setauthsize,
1916                         .encrypt = aead_encrypt,
1917                         .decrypt = aead_decrypt,
1918                         .givencrypt = aead_givencrypt,
1919                         .geniv = "<built-in>",
1920                         .ivsize = DES_BLOCK_SIZE,
1921                         .maxauthsize = SHA1_DIGEST_SIZE,
1922                         },
1923                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1924                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1925                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1926         },
1927         {
1928                 .name = "authenc(hmac(sha224),cbc(des))",
1929                 .driver_name = "authenc-hmac-sha224-cbc-des-caam",
1930                 .blocksize = DES_BLOCK_SIZE,
1931                 .type = CRYPTO_ALG_TYPE_AEAD,
1932                 .template_aead = {
1933                         .setkey = aead_setkey,
1934                         .setauthsize = aead_setauthsize,
1935                         .encrypt = aead_encrypt,
1936                         .decrypt = aead_decrypt,
1937                         .givencrypt = aead_givencrypt,
1938                         .geniv = "<built-in>",
1939                         .ivsize = DES_BLOCK_SIZE,
1940                         .maxauthsize = SHA224_DIGEST_SIZE,
1941                         },
1942                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1943                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1944                                    OP_ALG_AAI_HMAC_PRECOMP,
1945                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1946         },
1947         {
1948                 .name = "authenc(hmac(sha256),cbc(des))",
1949                 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
1950                 .blocksize = DES_BLOCK_SIZE,
1951                 .type = CRYPTO_ALG_TYPE_AEAD,
1952                 .template_aead = {
1953                         .setkey = aead_setkey,
1954                         .setauthsize = aead_setauthsize,
1955                         .encrypt = aead_encrypt,
1956                         .decrypt = aead_decrypt,
1957                         .givencrypt = aead_givencrypt,
1958                         .geniv = "<built-in>",
1959                         .ivsize = DES_BLOCK_SIZE,
1960                         .maxauthsize = SHA256_DIGEST_SIZE,
1961                         },
1962                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1963                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1964                                    OP_ALG_AAI_HMAC_PRECOMP,
1965                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1966         },
1967         {
1968                 .name = "authenc(hmac(sha384),cbc(des))",
1969                 .driver_name = "authenc-hmac-sha384-cbc-des-caam",
1970                 .blocksize = DES_BLOCK_SIZE,
1971                 .type = CRYPTO_ALG_TYPE_AEAD,
1972                 .template_aead = {
1973                         .setkey = aead_setkey,
1974                         .setauthsize = aead_setauthsize,
1975                         .encrypt = aead_encrypt,
1976                         .decrypt = aead_decrypt,
1977                         .givencrypt = aead_givencrypt,
1978                         .geniv = "<built-in>",
1979                         .ivsize = DES_BLOCK_SIZE,
1980                         .maxauthsize = SHA384_DIGEST_SIZE,
1981                         },
1982                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1983                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1984                                    OP_ALG_AAI_HMAC_PRECOMP,
1985                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1986         },
1987         {
1988                 .name = "authenc(hmac(sha512),cbc(des))",
1989                 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
1990                 .blocksize = DES_BLOCK_SIZE,
1991                 .type = CRYPTO_ALG_TYPE_AEAD,
1992                 .template_aead = {
1993                         .setkey = aead_setkey,
1994                         .setauthsize = aead_setauthsize,
1995                         .encrypt = aead_encrypt,
1996                         .decrypt = aead_decrypt,
1997                         .givencrypt = aead_givencrypt,
1998                         .geniv = "<built-in>",
1999                         .ivsize = DES_BLOCK_SIZE,
2000                         .maxauthsize = SHA512_DIGEST_SIZE,
2001                         },
2002                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2003                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2004                                    OP_ALG_AAI_HMAC_PRECOMP,
2005                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2006         },
2007         /* ablkcipher descriptor */
2008         {
2009                 .name = "cbc(aes)",
2010                 .driver_name = "cbc-aes-caam",
2011                 .blocksize = AES_BLOCK_SIZE,
2012                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2013                 .template_ablkcipher = {
2014                         .setkey = ablkcipher_setkey,
2015                         .encrypt = ablkcipher_encrypt,
2016                         .decrypt = ablkcipher_decrypt,
2017                         .geniv = "eseqiv",
2018                         .min_keysize = AES_MIN_KEY_SIZE,
2019                         .max_keysize = AES_MAX_KEY_SIZE,
2020                         .ivsize = AES_BLOCK_SIZE,
2021                         },
2022                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2023         },
2024         {
2025                 .name = "cbc(des3_ede)",
2026                 .driver_name = "cbc-3des-caam",
2027                 .blocksize = DES3_EDE_BLOCK_SIZE,
2028                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2029                 .template_ablkcipher = {
2030                         .setkey = ablkcipher_setkey,
2031                         .encrypt = ablkcipher_encrypt,
2032                         .decrypt = ablkcipher_decrypt,
2033                         .geniv = "eseqiv",
2034                         .min_keysize = DES3_EDE_KEY_SIZE,
2035                         .max_keysize = DES3_EDE_KEY_SIZE,
2036                         .ivsize = DES3_EDE_BLOCK_SIZE,
2037                         },
2038                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2039         },
2040         {
2041                 .name = "cbc(des)",
2042                 .driver_name = "cbc-des-caam",
2043                 .blocksize = DES_BLOCK_SIZE,
2044                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2045                 .template_ablkcipher = {
2046                         .setkey = ablkcipher_setkey,
2047                         .encrypt = ablkcipher_encrypt,
2048                         .decrypt = ablkcipher_decrypt,
2049                         .geniv = "eseqiv",
2050                         .min_keysize = DES_KEY_SIZE,
2051                         .max_keysize = DES_KEY_SIZE,
2052                         .ivsize = DES_BLOCK_SIZE,
2053                         },
2054                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2055         }
2056 };
2057
2058 struct caam_crypto_alg {
2059         struct list_head entry;
2060         struct device *ctrldev;
2061         int class1_alg_type;
2062         int class2_alg_type;
2063         int alg_op;
2064         struct crypto_alg crypto_alg;
2065 };
2066
2067 static int caam_cra_init(struct crypto_tfm *tfm)
2068 {
2069         struct crypto_alg *alg = tfm->__crt_alg;
2070         struct caam_crypto_alg *caam_alg =
2071                  container_of(alg, struct caam_crypto_alg, crypto_alg);
2072         struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2073         struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev);
2074         int tgt_jr = atomic_inc_return(&priv->tfm_count);
2075
2076         /*
2077          * distribute tfms across job rings to ensure in-order
2078          * crypto request processing per tfm
2079          */
2080         ctx->jrdev = priv->jrdev[(tgt_jr / 2) % priv->total_jobrs];
2081
2082         /* copy descriptor header template value */
2083         ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
2084         ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
2085         ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
2086
2087         return 0;
2088 }
2089
2090 static void caam_cra_exit(struct crypto_tfm *tfm)
2091 {
2092         struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2093
2094         if (ctx->sh_desc_enc_dma &&
2095             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
2096                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
2097                                  desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
2098         if (ctx->sh_desc_dec_dma &&
2099             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
2100                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
2101                                  desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
2102         if (ctx->sh_desc_givenc_dma &&
2103             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
2104                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
2105                                  desc_bytes(ctx->sh_desc_givenc),
2106                                  DMA_TO_DEVICE);
2107 }
2108
2109 static void __exit caam_algapi_exit(void)
2110 {
2111
2112         struct device_node *dev_node;
2113         struct platform_device *pdev;
2114         struct device *ctrldev;
2115         struct caam_drv_private *priv;
2116         struct caam_crypto_alg *t_alg, *n;
2117
2118         dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2119         if (!dev_node) {
2120                 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2121                 if (!dev_node)
2122                         return;
2123         }
2124
2125         pdev = of_find_device_by_node(dev_node);
2126         if (!pdev)
2127                 return;
2128
2129         ctrldev = &pdev->dev;
2130         of_node_put(dev_node);
2131         priv = dev_get_drvdata(ctrldev);
2132
2133         if (!priv->alg_list.next)
2134                 return;
2135
2136         list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2137                 crypto_unregister_alg(&t_alg->crypto_alg);
2138                 list_del(&t_alg->entry);
2139                 kfree(t_alg);
2140         }
2141 }
2142
2143 static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
2144                                               struct caam_alg_template
2145                                               *template)
2146 {
2147         struct caam_crypto_alg *t_alg;
2148         struct crypto_alg *alg;
2149
2150         t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
2151         if (!t_alg) {
2152                 dev_err(ctrldev, "failed to allocate t_alg\n");
2153                 return ERR_PTR(-ENOMEM);
2154         }
2155
2156         alg = &t_alg->crypto_alg;
2157
2158         snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2159         snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2160                  template->driver_name);
2161         alg->cra_module = THIS_MODULE;
2162         alg->cra_init = caam_cra_init;
2163         alg->cra_exit = caam_cra_exit;
2164         alg->cra_priority = CAAM_CRA_PRIORITY;
2165         alg->cra_blocksize = template->blocksize;
2166         alg->cra_alignmask = 0;
2167         alg->cra_ctxsize = sizeof(struct caam_ctx);
2168         alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2169                          template->type;
2170         switch (template->type) {
2171         case CRYPTO_ALG_TYPE_ABLKCIPHER:
2172                 alg->cra_type = &crypto_ablkcipher_type;
2173                 alg->cra_ablkcipher = template->template_ablkcipher;
2174                 break;
2175         case CRYPTO_ALG_TYPE_AEAD:
2176                 alg->cra_type = &crypto_aead_type;
2177                 alg->cra_aead = template->template_aead;
2178                 break;
2179         }
2180
2181         t_alg->class1_alg_type = template->class1_alg_type;
2182         t_alg->class2_alg_type = template->class2_alg_type;
2183         t_alg->alg_op = template->alg_op;
2184         t_alg->ctrldev = ctrldev;
2185
2186         return t_alg;
2187 }
2188
2189 static int __init caam_algapi_init(void)
2190 {
2191         struct device_node *dev_node;
2192         struct platform_device *pdev;
2193         struct device *ctrldev;
2194         struct caam_drv_private *priv;
2195         int i = 0, err = 0;
2196
2197         dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2198         if (!dev_node) {
2199                 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2200                 if (!dev_node)
2201                         return -ENODEV;
2202         }
2203
2204         pdev = of_find_device_by_node(dev_node);
2205         if (!pdev)
2206                 return -ENODEV;
2207
2208         ctrldev = &pdev->dev;
2209         priv = dev_get_drvdata(ctrldev);
2210         of_node_put(dev_node);
2211
2212         INIT_LIST_HEAD(&priv->alg_list);
2213
2214         atomic_set(&priv->tfm_count, -1);
2215
2216         /* register crypto algorithms the device supports */
2217         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2218                 /* TODO: check if h/w supports alg */
2219                 struct caam_crypto_alg *t_alg;
2220
2221                 t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
2222                 if (IS_ERR(t_alg)) {
2223                         err = PTR_ERR(t_alg);
2224                         dev_warn(ctrldev, "%s alg allocation failed\n",
2225                                  driver_algs[i].driver_name);
2226                         continue;
2227                 }
2228
2229                 err = crypto_register_alg(&t_alg->crypto_alg);
2230                 if (err) {
2231                         dev_warn(ctrldev, "%s alg registration failed\n",
2232                                 t_alg->crypto_alg.cra_driver_name);
2233                         kfree(t_alg);
2234                 } else
2235                         list_add_tail(&t_alg->entry, &priv->alg_list);
2236         }
2237         if (!list_empty(&priv->alg_list))
2238                 dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n",
2239                          (char *)of_get_property(dev_node, "compatible", NULL));
2240
2241         return err;
2242 }
2243
2244 module_init(caam_algapi_init);
2245 module_exit(caam_algapi_exit);
2246
2247 MODULE_LICENSE("GPL");
2248 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
2249 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");