Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[linux-drm-fsl-dcu.git] / drivers / crypto / caam / caamhash.c
1 /*
2  * caam - Freescale FSL CAAM support for ahash functions of crypto API
3  *
4  * Copyright 2011 Freescale Semiconductor, Inc.
5  *
6  * Based on caamalg.c crypto API driver.
7  *
8  * relationship of digest job descriptor or first job descriptor after init to
9  * shared descriptors:
10  *
11  * ---------------                     ---------------
12  * | JobDesc #1  |-------------------->|  ShareDesc  |
13  * | *(packet 1) |                     |  (hashKey)  |
14  * ---------------                     | (operation) |
15  *                                     ---------------
16  *
17  * relationship of subsequent job descriptors to shared descriptors:
18  *
19  * ---------------                     ---------------
20  * | JobDesc #2  |-------------------->|  ShareDesc  |
21  * | *(packet 2) |      |------------->|  (hashKey)  |
22  * ---------------      |    |-------->| (operation) |
23  *       .              |    |         | (load ctx2) |
24  *       .              |    |         ---------------
25  * ---------------      |    |
26  * | JobDesc #3  |------|    |
27  * | *(packet 3) |           |
28  * ---------------           |
29  *       .                   |
30  *       .                   |
31  * ---------------           |
32  * | JobDesc #4  |------------
33  * | *(packet 4) |
34  * ---------------
35  *
36  * The SharedDesc never changes for a connection unless rekeyed, but
37  * each packet will likely be in a different place. So all we need
38  * to know to process the packet is where the input is, where the
39  * output goes, and what context we want to process with. Context is
40  * in the SharedDesc, packet references in the JobDesc.
41  *
42  * So, a job desc looks like:
43  *
44  * ---------------------
45  * | Header            |
46  * | ShareDesc Pointer |
47  * | SEQ_OUT_PTR       |
48  * | (output buffer)   |
49  * | (output length)   |
50  * | SEQ_IN_PTR        |
51  * | (input buffer)    |
52  * | (input length)    |
53  * ---------------------
54  */
55
56 #include "compat.h"
57
58 #include "regs.h"
59 #include "intern.h"
60 #include "desc_constr.h"
61 #include "jr.h"
62 #include "error.h"
63 #include "sg_sw_sec4.h"
64 #include "key_gen.h"
65
66 #define CAAM_CRA_PRIORITY               3000
67
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE          (SHA512_DIGEST_SIZE * 2)
70
71 #define CAAM_MAX_HASH_BLOCK_SIZE        SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE       SHA512_DIGEST_SIZE
73
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE                 (4 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN           (6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN     (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN            (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN            (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN           (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
81
82 #define DESC_HASH_MAX_USED_BYTES        (DESC_AHASH_FINAL_LEN + \
83                                          CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN          (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
85
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN                    8
88 #define MAX_CTX_LEN                     (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
89
90 #ifdef DEBUG
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
93 #else
94 #define debug(format, arg...)
95 #endif
96
97
98 static struct list_head hash_list;
99
100 /* ahash per-session context */
101 struct caam_hash_ctx {
102         struct device *jrdev;
103         u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
104         u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
105         u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
106         u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
107         u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
108         dma_addr_t sh_desc_update_dma;
109         dma_addr_t sh_desc_update_first_dma;
110         dma_addr_t sh_desc_fin_dma;
111         dma_addr_t sh_desc_digest_dma;
112         dma_addr_t sh_desc_finup_dma;
113         u32 alg_type;
114         u32 alg_op;
115         u8 key[CAAM_MAX_HASH_KEY_SIZE];
116         dma_addr_t key_dma;
117         int ctx_len;
118         unsigned int split_key_len;
119         unsigned int split_key_pad_len;
120 };
121
122 /* ahash state */
123 struct caam_hash_state {
124         dma_addr_t buf_dma;
125         dma_addr_t ctx_dma;
126         u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
127         int buflen_0;
128         u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
129         int buflen_1;
130         u8 caam_ctx[MAX_CTX_LEN];
131         int (*update)(struct ahash_request *req);
132         int (*final)(struct ahash_request *req);
133         int (*finup)(struct ahash_request *req);
134         int current_buf;
135 };
136
137 /* Common job descriptor seq in/out ptr routines */
138
139 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
140 static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
141                                        struct caam_hash_state *state,
142                                        int ctx_len)
143 {
144         state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
145                                         ctx_len, DMA_FROM_DEVICE);
146         append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
147 }
148
149 /* Map req->result, and append seq_out_ptr command that points to it */
150 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
151                                                 u8 *result, int digestsize)
152 {
153         dma_addr_t dst_dma;
154
155         dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
156         append_seq_out_ptr(desc, dst_dma, digestsize, 0);
157
158         return dst_dma;
159 }
160
161 /* Map current buffer in state and put it in link table */
162 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
163                                             struct sec4_sg_entry *sec4_sg,
164                                             u8 *buf, int buflen)
165 {
166         dma_addr_t buf_dma;
167
168         buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
169         dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
170
171         return buf_dma;
172 }
173
174 /* Map req->src and put it in link table */
175 static inline void src_map_to_sec4_sg(struct device *jrdev,
176                                       struct scatterlist *src, int src_nents,
177                                       struct sec4_sg_entry *sec4_sg,
178                                       bool chained)
179 {
180         dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained);
181         sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
182 }
183
184 /*
185  * Only put buffer in link table if it contains data, which is possible,
186  * since a buffer has previously been used, and needs to be unmapped,
187  */
188 static inline dma_addr_t
189 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
190                        u8 *buf, dma_addr_t buf_dma, int buflen,
191                        int last_buflen)
192 {
193         if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
194                 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
195         if (buflen)
196                 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
197         else
198                 buf_dma = 0;
199
200         return buf_dma;
201 }
202
203 /* Map state->caam_ctx, and add it to link table */
204 static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
205                                       struct caam_hash_state *state,
206                                       int ctx_len,
207                                       struct sec4_sg_entry *sec4_sg,
208                                       u32 flag)
209 {
210         state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
211         dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
212 }
213
214 /* Common shared descriptor commands */
215 static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
216 {
217         append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
218                           ctx->split_key_len, CLASS_2 |
219                           KEY_DEST_MDHA_SPLIT | KEY_ENC);
220 }
221
222 /* Append key if it has been set */
223 static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
224 {
225         u32 *key_jump_cmd;
226
227         init_sh_desc(desc, HDR_SHARE_SERIAL);
228
229         if (ctx->split_key_len) {
230                 /* Skip if already shared */
231                 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
232                                            JUMP_COND_SHRD);
233
234                 append_key_ahash(desc, ctx);
235
236                 set_jump_tgt_here(desc, key_jump_cmd);
237         }
238
239         /* Propagate errors from shared to job descriptor */
240         append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
241 }
242
243 /*
244  * For ahash read data from seqin following state->caam_ctx,
245  * and write resulting class2 context to seqout, which may be state->caam_ctx
246  * or req->result
247  */
248 static inline void ahash_append_load_str(u32 *desc, int digestsize)
249 {
250         /* Calculate remaining bytes to read */
251         append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
252
253         /* Read remaining bytes */
254         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
255                              FIFOLD_TYPE_MSG | KEY_VLF);
256
257         /* Store class2 context bytes */
258         append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
259                          LDST_SRCDST_BYTE_CONTEXT);
260 }
261
262 /*
263  * For ahash update, final and finup, import context, read and write to seqout
264  */
265 static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
266                                          int digestsize,
267                                          struct caam_hash_ctx *ctx)
268 {
269         init_sh_desc_key_ahash(desc, ctx);
270
271         /* Import context from software */
272         append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
273                    LDST_CLASS_2_CCB | ctx->ctx_len);
274
275         /* Class 2 operation */
276         append_operation(desc, op | state | OP_ALG_ENCRYPT);
277
278         /*
279          * Load from buf and/or src and write to req->result or state->context
280          */
281         ahash_append_load_str(desc, digestsize);
282 }
283
284 /* For ahash firsts and digest, read and write to seqout */
285 static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
286                                      int digestsize, struct caam_hash_ctx *ctx)
287 {
288         init_sh_desc_key_ahash(desc, ctx);
289
290         /* Class 2 operation */
291         append_operation(desc, op | state | OP_ALG_ENCRYPT);
292
293         /*
294          * Load from buf and/or src and write to req->result or state->context
295          */
296         ahash_append_load_str(desc, digestsize);
297 }
298
299 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
300 {
301         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
302         int digestsize = crypto_ahash_digestsize(ahash);
303         struct device *jrdev = ctx->jrdev;
304         u32 have_key = 0;
305         u32 *desc;
306
307         if (ctx->split_key_len)
308                 have_key = OP_ALG_AAI_HMAC_PRECOMP;
309
310         /* ahash_update shared descriptor */
311         desc = ctx->sh_desc_update;
312
313         init_sh_desc(desc, HDR_SHARE_SERIAL);
314
315         /* Import context from software */
316         append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
317                    LDST_CLASS_2_CCB | ctx->ctx_len);
318
319         /* Class 2 operation */
320         append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
321                          OP_ALG_ENCRYPT);
322
323         /* Load data and write to result or context */
324         ahash_append_load_str(desc, ctx->ctx_len);
325
326         ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
327                                                  DMA_TO_DEVICE);
328         if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
329                 dev_err(jrdev, "unable to map shared descriptor\n");
330                 return -ENOMEM;
331         }
332 #ifdef DEBUG
333         print_hex_dump(KERN_ERR,
334                        "ahash update shdesc@"__stringify(__LINE__)": ",
335                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
336 #endif
337
338         /* ahash_update_first shared descriptor */
339         desc = ctx->sh_desc_update_first;
340
341         ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
342                           ctx->ctx_len, ctx);
343
344         ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
345                                                        desc_bytes(desc),
346                                                        DMA_TO_DEVICE);
347         if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
348                 dev_err(jrdev, "unable to map shared descriptor\n");
349                 return -ENOMEM;
350         }
351 #ifdef DEBUG
352         print_hex_dump(KERN_ERR,
353                        "ahash update first shdesc@"__stringify(__LINE__)": ",
354                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
355 #endif
356
357         /* ahash_final shared descriptor */
358         desc = ctx->sh_desc_fin;
359
360         ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
361                               OP_ALG_AS_FINALIZE, digestsize, ctx);
362
363         ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
364                                               DMA_TO_DEVICE);
365         if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
366                 dev_err(jrdev, "unable to map shared descriptor\n");
367                 return -ENOMEM;
368         }
369 #ifdef DEBUG
370         print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
371                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
372                        desc_bytes(desc), 1);
373 #endif
374
375         /* ahash_finup shared descriptor */
376         desc = ctx->sh_desc_finup;
377
378         ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
379                               OP_ALG_AS_FINALIZE, digestsize, ctx);
380
381         ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
382                                                 DMA_TO_DEVICE);
383         if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
384                 dev_err(jrdev, "unable to map shared descriptor\n");
385                 return -ENOMEM;
386         }
387 #ifdef DEBUG
388         print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
389                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
390                        desc_bytes(desc), 1);
391 #endif
392
393         /* ahash_digest shared descriptor */
394         desc = ctx->sh_desc_digest;
395
396         ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
397                           digestsize, ctx);
398
399         ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
400                                                  desc_bytes(desc),
401                                                  DMA_TO_DEVICE);
402         if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
403                 dev_err(jrdev, "unable to map shared descriptor\n");
404                 return -ENOMEM;
405         }
406 #ifdef DEBUG
407         print_hex_dump(KERN_ERR,
408                        "ahash digest shdesc@"__stringify(__LINE__)": ",
409                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
410                        desc_bytes(desc), 1);
411 #endif
412
413         return 0;
414 }
415
416 static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
417                               u32 keylen)
418 {
419         return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
420                                ctx->split_key_pad_len, key_in, keylen,
421                                ctx->alg_op);
422 }
423
424 /* Digest hash size if it is too large */
425 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
426                            u32 *keylen, u8 *key_out, u32 digestsize)
427 {
428         struct device *jrdev = ctx->jrdev;
429         u32 *desc;
430         struct split_key_result result;
431         dma_addr_t src_dma, dst_dma;
432         int ret = 0;
433
434         desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
435         if (!desc) {
436                 dev_err(jrdev, "unable to allocate key input memory\n");
437                 return -ENOMEM;
438         }
439
440         init_job_desc(desc, 0);
441
442         src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
443                                  DMA_TO_DEVICE);
444         if (dma_mapping_error(jrdev, src_dma)) {
445                 dev_err(jrdev, "unable to map key input memory\n");
446                 kfree(desc);
447                 return -ENOMEM;
448         }
449         dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
450                                  DMA_FROM_DEVICE);
451         if (dma_mapping_error(jrdev, dst_dma)) {
452                 dev_err(jrdev, "unable to map key output memory\n");
453                 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
454                 kfree(desc);
455                 return -ENOMEM;
456         }
457
458         /* Job descriptor to perform unkeyed hash on key_in */
459         append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
460                          OP_ALG_AS_INITFINAL);
461         append_seq_in_ptr(desc, src_dma, *keylen, 0);
462         append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
463                              FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
464         append_seq_out_ptr(desc, dst_dma, digestsize, 0);
465         append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
466                          LDST_SRCDST_BYTE_CONTEXT);
467
468 #ifdef DEBUG
469         print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
470                        DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
471         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
472                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
473 #endif
474
475         result.err = 0;
476         init_completion(&result.completion);
477
478         ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
479         if (!ret) {
480                 /* in progress */
481                 wait_for_completion_interruptible(&result.completion);
482                 ret = result.err;
483 #ifdef DEBUG
484                 print_hex_dump(KERN_ERR,
485                                "digested key@"__stringify(__LINE__)": ",
486                                DUMP_PREFIX_ADDRESS, 16, 4, key_in,
487                                digestsize, 1);
488 #endif
489         }
490         *keylen = digestsize;
491
492         dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
493         dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
494
495         kfree(desc);
496
497         return ret;
498 }
499
500 static int ahash_setkey(struct crypto_ahash *ahash,
501                         const u8 *key, unsigned int keylen)
502 {
503         /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
504         static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
505         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
506         struct device *jrdev = ctx->jrdev;
507         int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
508         int digestsize = crypto_ahash_digestsize(ahash);
509         int ret = 0;
510         u8 *hashed_key = NULL;
511
512 #ifdef DEBUG
513         printk(KERN_ERR "keylen %d\n", keylen);
514 #endif
515
516         if (keylen > blocksize) {
517                 hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
518                                      GFP_DMA);
519                 if (!hashed_key)
520                         return -ENOMEM;
521                 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
522                                       digestsize);
523                 if (ret)
524                         goto badkey;
525                 key = hashed_key;
526         }
527
528         /* Pick class 2 key length from algorithm submask */
529         ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
530                                       OP_ALG_ALGSEL_SHIFT] * 2;
531         ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
532
533 #ifdef DEBUG
534         printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
535                ctx->split_key_len, ctx->split_key_pad_len);
536         print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
537                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
538 #endif
539
540         ret = gen_split_hash_key(ctx, key, keylen);
541         if (ret)
542                 goto badkey;
543
544         ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
545                                       DMA_TO_DEVICE);
546         if (dma_mapping_error(jrdev, ctx->key_dma)) {
547                 dev_err(jrdev, "unable to map key i/o memory\n");
548                 return -ENOMEM;
549         }
550 #ifdef DEBUG
551         print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
552                        DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
553                        ctx->split_key_pad_len, 1);
554 #endif
555
556         ret = ahash_set_sh_desc(ahash);
557         if (ret) {
558                 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
559                                  DMA_TO_DEVICE);
560         }
561
562         kfree(hashed_key);
563         return ret;
564 badkey:
565         kfree(hashed_key);
566         crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
567         return -EINVAL;
568 }
569
570 /*
571  * ahash_edesc - s/w-extended ahash descriptor
572  * @dst_dma: physical mapped address of req->result
573  * @sec4_sg_dma: physical mapped address of h/w link table
574  * @chained: if source is chained
575  * @src_nents: number of segments in input scatterlist
576  * @sec4_sg_bytes: length of dma mapped sec4_sg space
577  * @sec4_sg: pointer to h/w link table
578  * @hw_desc: the h/w job descriptor followed by any referenced link tables
579  */
580 struct ahash_edesc {
581         dma_addr_t dst_dma;
582         dma_addr_t sec4_sg_dma;
583         bool chained;
584         int src_nents;
585         int sec4_sg_bytes;
586         struct sec4_sg_entry *sec4_sg;
587         u32 hw_desc[0];
588 };
589
590 static inline void ahash_unmap(struct device *dev,
591                         struct ahash_edesc *edesc,
592                         struct ahash_request *req, int dst_len)
593 {
594         if (edesc->src_nents)
595                 dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
596                                      DMA_TO_DEVICE, edesc->chained);
597         if (edesc->dst_dma)
598                 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
599
600         if (edesc->sec4_sg_bytes)
601                 dma_unmap_single(dev, edesc->sec4_sg_dma,
602                                  edesc->sec4_sg_bytes, DMA_TO_DEVICE);
603 }
604
605 static inline void ahash_unmap_ctx(struct device *dev,
606                         struct ahash_edesc *edesc,
607                         struct ahash_request *req, int dst_len, u32 flag)
608 {
609         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
610         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
611         struct caam_hash_state *state = ahash_request_ctx(req);
612
613         if (state->ctx_dma)
614                 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
615         ahash_unmap(dev, edesc, req, dst_len);
616 }
617
618 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
619                        void *context)
620 {
621         struct ahash_request *req = context;
622         struct ahash_edesc *edesc;
623         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
624         int digestsize = crypto_ahash_digestsize(ahash);
625 #ifdef DEBUG
626         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
627         struct caam_hash_state *state = ahash_request_ctx(req);
628
629         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
630 #endif
631
632         edesc = (struct ahash_edesc *)((char *)desc -
633                  offsetof(struct ahash_edesc, hw_desc));
634         if (err) {
635                 char tmp[CAAM_ERROR_STR_MAX];
636
637                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
638         }
639
640         ahash_unmap(jrdev, edesc, req, digestsize);
641         kfree(edesc);
642
643 #ifdef DEBUG
644         print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
645                        DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
646                        ctx->ctx_len, 1);
647         if (req->result)
648                 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
649                                DUMP_PREFIX_ADDRESS, 16, 4, req->result,
650                                digestsize, 1);
651 #endif
652
653         req->base.complete(&req->base, err);
654 }
655
656 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
657                             void *context)
658 {
659         struct ahash_request *req = context;
660         struct ahash_edesc *edesc;
661         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
662         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
663 #ifdef DEBUG
664         struct caam_hash_state *state = ahash_request_ctx(req);
665         int digestsize = crypto_ahash_digestsize(ahash);
666
667         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
668 #endif
669
670         edesc = (struct ahash_edesc *)((char *)desc -
671                  offsetof(struct ahash_edesc, hw_desc));
672         if (err) {
673                 char tmp[CAAM_ERROR_STR_MAX];
674
675                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
676         }
677
678         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
679         kfree(edesc);
680
681 #ifdef DEBUG
682         print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
683                        DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
684                        ctx->ctx_len, 1);
685         if (req->result)
686                 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
687                                DUMP_PREFIX_ADDRESS, 16, 4, req->result,
688                                digestsize, 1);
689 #endif
690
691         req->base.complete(&req->base, err);
692 }
693
694 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
695                                void *context)
696 {
697         struct ahash_request *req = context;
698         struct ahash_edesc *edesc;
699         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
700         int digestsize = crypto_ahash_digestsize(ahash);
701 #ifdef DEBUG
702         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
703         struct caam_hash_state *state = ahash_request_ctx(req);
704
705         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
706 #endif
707
708         edesc = (struct ahash_edesc *)((char *)desc -
709                  offsetof(struct ahash_edesc, hw_desc));
710         if (err) {
711                 char tmp[CAAM_ERROR_STR_MAX];
712
713                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
714         }
715
716         ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
717         kfree(edesc);
718
719 #ifdef DEBUG
720         print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
721                        DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
722                        ctx->ctx_len, 1);
723         if (req->result)
724                 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
725                                DUMP_PREFIX_ADDRESS, 16, 4, req->result,
726                                digestsize, 1);
727 #endif
728
729         req->base.complete(&req->base, err);
730 }
731
732 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
733                                void *context)
734 {
735         struct ahash_request *req = context;
736         struct ahash_edesc *edesc;
737         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
738         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
739 #ifdef DEBUG
740         struct caam_hash_state *state = ahash_request_ctx(req);
741         int digestsize = crypto_ahash_digestsize(ahash);
742
743         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
744 #endif
745
746         edesc = (struct ahash_edesc *)((char *)desc -
747                  offsetof(struct ahash_edesc, hw_desc));
748         if (err) {
749                 char tmp[CAAM_ERROR_STR_MAX];
750
751                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
752         }
753
754         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
755         kfree(edesc);
756
757 #ifdef DEBUG
758         print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
759                        DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
760                        ctx->ctx_len, 1);
761         if (req->result)
762                 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
763                                DUMP_PREFIX_ADDRESS, 16, 4, req->result,
764                                digestsize, 1);
765 #endif
766
767         req->base.complete(&req->base, err);
768 }
769
770 /* submit update job descriptor */
771 static int ahash_update_ctx(struct ahash_request *req)
772 {
773         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
774         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
775         struct caam_hash_state *state = ahash_request_ctx(req);
776         struct device *jrdev = ctx->jrdev;
777         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
778                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
779         u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
780         int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
781         u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
782         int *next_buflen = state->current_buf ? &state->buflen_0 :
783                            &state->buflen_1, last_buflen;
784         int in_len = *buflen + req->nbytes, to_hash;
785         u32 *sh_desc = ctx->sh_desc_update, *desc;
786         dma_addr_t ptr = ctx->sh_desc_update_dma;
787         int src_nents, sec4_sg_bytes, sec4_sg_src_index;
788         struct ahash_edesc *edesc;
789         bool chained = false;
790         int ret = 0;
791         int sh_len;
792
793         last_buflen = *next_buflen;
794         *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
795         to_hash = in_len - *next_buflen;
796
797         if (to_hash) {
798                 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
799                                        &chained);
800                 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
801                 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
802                                  sizeof(struct sec4_sg_entry);
803
804                 /*
805                  * allocate space for base edesc and hw desc commands,
806                  * link tables
807                  */
808                 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
809                                 sec4_sg_bytes, GFP_DMA | flags);
810                 if (!edesc) {
811                         dev_err(jrdev,
812                                 "could not allocate extended descriptor\n");
813                         return -ENOMEM;
814                 }
815
816                 edesc->src_nents = src_nents;
817                 edesc->chained = chained;
818                 edesc->sec4_sg_bytes = sec4_sg_bytes;
819                 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
820                                  DESC_JOB_IO_LEN;
821                 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
822                                                      sec4_sg_bytes,
823                                                      DMA_TO_DEVICE);
824
825                 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
826                                    edesc->sec4_sg, DMA_BIDIRECTIONAL);
827
828                 state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
829                                                         edesc->sec4_sg + 1,
830                                                         buf, state->buf_dma,
831                                                         *buflen, last_buflen);
832
833                 if (src_nents) {
834                         src_map_to_sec4_sg(jrdev, req->src, src_nents,
835                                            edesc->sec4_sg + sec4_sg_src_index,
836                                            chained);
837                         if (*next_buflen) {
838                                 sg_copy_part(next_buf, req->src, to_hash -
839                                              *buflen, req->nbytes);
840                                 state->current_buf = !state->current_buf;
841                         }
842                 } else {
843                         (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
844                                                         SEC4_SG_LEN_FIN;
845                 }
846
847                 sh_len = desc_len(sh_desc);
848                 desc = edesc->hw_desc;
849                 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
850                                      HDR_REVERSE);
851
852                 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
853                                        to_hash, LDST_SGF);
854
855                 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
856
857 #ifdef DEBUG
858                 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
859                                DUMP_PREFIX_ADDRESS, 16, 4, desc,
860                                desc_bytes(desc), 1);
861 #endif
862
863                 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
864                 if (!ret) {
865                         ret = -EINPROGRESS;
866                 } else {
867                         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
868                                            DMA_BIDIRECTIONAL);
869                         kfree(edesc);
870                 }
871         } else if (*next_buflen) {
872                 sg_copy(buf + *buflen, req->src, req->nbytes);
873                 *buflen = *next_buflen;
874                 *next_buflen = last_buflen;
875         }
876 #ifdef DEBUG
877         print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
878                        DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
879         print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
880                        DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
881                        *next_buflen, 1);
882 #endif
883
884         return ret;
885 }
886
887 static int ahash_final_ctx(struct ahash_request *req)
888 {
889         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
890         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
891         struct caam_hash_state *state = ahash_request_ctx(req);
892         struct device *jrdev = ctx->jrdev;
893         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
894                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
895         u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
896         int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
897         int last_buflen = state->current_buf ? state->buflen_0 :
898                           state->buflen_1;
899         u32 *sh_desc = ctx->sh_desc_fin, *desc;
900         dma_addr_t ptr = ctx->sh_desc_fin_dma;
901         int sec4_sg_bytes;
902         int digestsize = crypto_ahash_digestsize(ahash);
903         struct ahash_edesc *edesc;
904         int ret = 0;
905         int sh_len;
906
907         sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
908
909         /* allocate space for base edesc and hw desc commands, link tables */
910         edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
911                         sec4_sg_bytes, GFP_DMA | flags);
912         if (!edesc) {
913                 dev_err(jrdev, "could not allocate extended descriptor\n");
914                 return -ENOMEM;
915         }
916
917         sh_len = desc_len(sh_desc);
918         desc = edesc->hw_desc;
919         init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
920
921         edesc->sec4_sg_bytes = sec4_sg_bytes;
922         edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
923                          DESC_JOB_IO_LEN;
924         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
925                                             sec4_sg_bytes, DMA_TO_DEVICE);
926         edesc->src_nents = 0;
927
928         ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
929                            DMA_TO_DEVICE);
930
931         state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
932                                                 buf, state->buf_dma, buflen,
933                                                 last_buflen);
934         (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
935
936         append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
937                           LDST_SGF);
938
939         edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
940                                                 digestsize);
941
942 #ifdef DEBUG
943         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
944                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
945 #endif
946
947         ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
948         if (!ret) {
949                 ret = -EINPROGRESS;
950         } else {
951                 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
952                 kfree(edesc);
953         }
954
955         return ret;
956 }
957
958 static int ahash_finup_ctx(struct ahash_request *req)
959 {
960         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
961         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
962         struct caam_hash_state *state = ahash_request_ctx(req);
963         struct device *jrdev = ctx->jrdev;
964         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
965                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
966         u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
967         int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
968         int last_buflen = state->current_buf ? state->buflen_0 :
969                           state->buflen_1;
970         u32 *sh_desc = ctx->sh_desc_finup, *desc;
971         dma_addr_t ptr = ctx->sh_desc_finup_dma;
972         int sec4_sg_bytes, sec4_sg_src_index;
973         int src_nents;
974         int digestsize = crypto_ahash_digestsize(ahash);
975         struct ahash_edesc *edesc;
976         bool chained = false;
977         int ret = 0;
978         int sh_len;
979
980         src_nents = __sg_count(req->src, req->nbytes, &chained);
981         sec4_sg_src_index = 1 + (buflen ? 1 : 0);
982         sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
983                          sizeof(struct sec4_sg_entry);
984
985         /* allocate space for base edesc and hw desc commands, link tables */
986         edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
987                         sec4_sg_bytes, GFP_DMA | flags);
988         if (!edesc) {
989                 dev_err(jrdev, "could not allocate extended descriptor\n");
990                 return -ENOMEM;
991         }
992
993         sh_len = desc_len(sh_desc);
994         desc = edesc->hw_desc;
995         init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
996
997         edesc->src_nents = src_nents;
998         edesc->chained = chained;
999         edesc->sec4_sg_bytes = sec4_sg_bytes;
1000         edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1001                          DESC_JOB_IO_LEN;
1002         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1003                                             sec4_sg_bytes, DMA_TO_DEVICE);
1004
1005         ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
1006                            DMA_TO_DEVICE);
1007
1008         state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1009                                                 buf, state->buf_dma, buflen,
1010                                                 last_buflen);
1011
1012         src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
1013                            sec4_sg_src_index, chained);
1014
1015         append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
1016                                buflen + req->nbytes, LDST_SGF);
1017
1018         edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1019                                                 digestsize);
1020
1021 #ifdef DEBUG
1022         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1023                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1024 #endif
1025
1026         ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1027         if (!ret) {
1028                 ret = -EINPROGRESS;
1029         } else {
1030                 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1031                 kfree(edesc);
1032         }
1033
1034         return ret;
1035 }
1036
1037 static int ahash_digest(struct ahash_request *req)
1038 {
1039         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1040         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1041         struct device *jrdev = ctx->jrdev;
1042         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1043                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1044         u32 *sh_desc = ctx->sh_desc_digest, *desc;
1045         dma_addr_t ptr = ctx->sh_desc_digest_dma;
1046         int digestsize = crypto_ahash_digestsize(ahash);
1047         int src_nents, sec4_sg_bytes;
1048         dma_addr_t src_dma;
1049         struct ahash_edesc *edesc;
1050         bool chained = false;
1051         int ret = 0;
1052         u32 options;
1053         int sh_len;
1054
1055         src_nents = sg_count(req->src, req->nbytes, &chained);
1056         dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE,
1057                            chained);
1058         sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1059
1060         /* allocate space for base edesc and hw desc commands, link tables */
1061         edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
1062                         DESC_JOB_IO_LEN, GFP_DMA | flags);
1063         if (!edesc) {
1064                 dev_err(jrdev, "could not allocate extended descriptor\n");
1065                 return -ENOMEM;
1066         }
1067         edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1068                           DESC_JOB_IO_LEN;
1069         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1070                                             sec4_sg_bytes, DMA_TO_DEVICE);
1071         edesc->src_nents = src_nents;
1072         edesc->chained = chained;
1073
1074         sh_len = desc_len(sh_desc);
1075         desc = edesc->hw_desc;
1076         init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1077
1078         if (src_nents) {
1079                 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
1080                 src_dma = edesc->sec4_sg_dma;
1081                 options = LDST_SGF;
1082         } else {
1083                 src_dma = sg_dma_address(req->src);
1084                 options = 0;
1085         }
1086         append_seq_in_ptr(desc, src_dma, req->nbytes, options);
1087
1088         edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1089                                                 digestsize);
1090
1091 #ifdef DEBUG
1092         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1093                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1094 #endif
1095
1096         ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1097         if (!ret) {
1098                 ret = -EINPROGRESS;
1099         } else {
1100                 ahash_unmap(jrdev, edesc, req, digestsize);
1101                 kfree(edesc);
1102         }
1103
1104         return ret;
1105 }
1106
1107 /* submit ahash final if it the first job descriptor */
1108 static int ahash_final_no_ctx(struct ahash_request *req)
1109 {
1110         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1111         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1112         struct caam_hash_state *state = ahash_request_ctx(req);
1113         struct device *jrdev = ctx->jrdev;
1114         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1115                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1116         u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1117         int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1118         u32 *sh_desc = ctx->sh_desc_digest, *desc;
1119         dma_addr_t ptr = ctx->sh_desc_digest_dma;
1120         int digestsize = crypto_ahash_digestsize(ahash);
1121         struct ahash_edesc *edesc;
1122         int ret = 0;
1123         int sh_len;
1124
1125         /* allocate space for base edesc and hw desc commands, link tables */
1126         edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
1127                         GFP_DMA | flags);
1128         if (!edesc) {
1129                 dev_err(jrdev, "could not allocate extended descriptor\n");
1130                 return -ENOMEM;
1131         }
1132
1133         sh_len = desc_len(sh_desc);
1134         desc = edesc->hw_desc;
1135         init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1136
1137         state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1138
1139         append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1140
1141         edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1142                                                 digestsize);
1143         edesc->src_nents = 0;
1144
1145 #ifdef DEBUG
1146         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1147                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1148 #endif
1149
1150         ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1151         if (!ret) {
1152                 ret = -EINPROGRESS;
1153         } else {
1154                 ahash_unmap(jrdev, edesc, req, digestsize);
1155                 kfree(edesc);
1156         }
1157
1158         return ret;
1159 }
1160
1161 /* submit ahash update if it the first job descriptor after update */
1162 static int ahash_update_no_ctx(struct ahash_request *req)
1163 {
1164         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1165         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1166         struct caam_hash_state *state = ahash_request_ctx(req);
1167         struct device *jrdev = ctx->jrdev;
1168         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1169                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1170         u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1171         int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1172         u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1173         int *next_buflen = state->current_buf ? &state->buflen_0 :
1174                            &state->buflen_1;
1175         int in_len = *buflen + req->nbytes, to_hash;
1176         int sec4_sg_bytes, src_nents;
1177         struct ahash_edesc *edesc;
1178         u32 *desc, *sh_desc = ctx->sh_desc_update_first;
1179         dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1180         bool chained = false;
1181         int ret = 0;
1182         int sh_len;
1183
1184         *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1185         to_hash = in_len - *next_buflen;
1186
1187         if (to_hash) {
1188                 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
1189                                        &chained);
1190                 sec4_sg_bytes = (1 + src_nents) *
1191                                 sizeof(struct sec4_sg_entry);
1192
1193                 /*
1194                  * allocate space for base edesc and hw desc commands,
1195                  * link tables
1196                  */
1197                 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1198                                 sec4_sg_bytes, GFP_DMA | flags);
1199                 if (!edesc) {
1200                         dev_err(jrdev,
1201                                 "could not allocate extended descriptor\n");
1202                         return -ENOMEM;
1203                 }
1204
1205                 edesc->src_nents = src_nents;
1206                 edesc->chained = chained;
1207                 edesc->sec4_sg_bytes = sec4_sg_bytes;
1208                 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1209                                  DESC_JOB_IO_LEN;
1210                 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1211                                                     sec4_sg_bytes,
1212                                                     DMA_TO_DEVICE);
1213
1214                 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1215                                                     buf, *buflen);
1216                 src_map_to_sec4_sg(jrdev, req->src, src_nents,
1217                                    edesc->sec4_sg + 1, chained);
1218                 if (*next_buflen) {
1219                         sg_copy_part(next_buf, req->src, to_hash - *buflen,
1220                                     req->nbytes);
1221                         state->current_buf = !state->current_buf;
1222                 }
1223
1224                 sh_len = desc_len(sh_desc);
1225                 desc = edesc->hw_desc;
1226                 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1227                                      HDR_REVERSE);
1228
1229                 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1230
1231                 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1232
1233 #ifdef DEBUG
1234                 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1235                                DUMP_PREFIX_ADDRESS, 16, 4, desc,
1236                                desc_bytes(desc), 1);
1237 #endif
1238
1239                 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1240                 if (!ret) {
1241                         ret = -EINPROGRESS;
1242                         state->update = ahash_update_ctx;
1243                         state->finup = ahash_finup_ctx;
1244                         state->final = ahash_final_ctx;
1245                 } else {
1246                         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1247                                         DMA_TO_DEVICE);
1248                         kfree(edesc);
1249                 }
1250         } else if (*next_buflen) {
1251                 sg_copy(buf + *buflen, req->src, req->nbytes);
1252                 *buflen = *next_buflen;
1253                 *next_buflen = 0;
1254         }
1255 #ifdef DEBUG
1256         print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1257                        DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1258         print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1259                        DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1260                        *next_buflen, 1);
1261 #endif
1262
1263         return ret;
1264 }
1265
1266 /* submit ahash finup if it the first job descriptor after update */
1267 static int ahash_finup_no_ctx(struct ahash_request *req)
1268 {
1269         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1270         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1271         struct caam_hash_state *state = ahash_request_ctx(req);
1272         struct device *jrdev = ctx->jrdev;
1273         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1274                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1275         u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1276         int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1277         int last_buflen = state->current_buf ? state->buflen_0 :
1278                           state->buflen_1;
1279         u32 *sh_desc = ctx->sh_desc_digest, *desc;
1280         dma_addr_t ptr = ctx->sh_desc_digest_dma;
1281         int sec4_sg_bytes, sec4_sg_src_index, src_nents;
1282         int digestsize = crypto_ahash_digestsize(ahash);
1283         struct ahash_edesc *edesc;
1284         bool chained = false;
1285         int sh_len;
1286         int ret = 0;
1287
1288         src_nents = __sg_count(req->src, req->nbytes, &chained);
1289         sec4_sg_src_index = 2;
1290         sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1291                          sizeof(struct sec4_sg_entry);
1292
1293         /* allocate space for base edesc and hw desc commands, link tables */
1294         edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1295                         sec4_sg_bytes, GFP_DMA | flags);
1296         if (!edesc) {
1297                 dev_err(jrdev, "could not allocate extended descriptor\n");
1298                 return -ENOMEM;
1299         }
1300
1301         sh_len = desc_len(sh_desc);
1302         desc = edesc->hw_desc;
1303         init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1304
1305         edesc->src_nents = src_nents;
1306         edesc->chained = chained;
1307         edesc->sec4_sg_bytes = sec4_sg_bytes;
1308         edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1309                          DESC_JOB_IO_LEN;
1310         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1311                                             sec4_sg_bytes, DMA_TO_DEVICE);
1312
1313         state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1314                                                 state->buf_dma, buflen,
1315                                                 last_buflen);
1316
1317         src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
1318                            chained);
1319
1320         append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
1321                                req->nbytes, LDST_SGF);
1322
1323         edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1324                                                 digestsize);
1325
1326 #ifdef DEBUG
1327         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1328                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1329 #endif
1330
1331         ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1332         if (!ret) {
1333                 ret = -EINPROGRESS;
1334         } else {
1335                 ahash_unmap(jrdev, edesc, req, digestsize);
1336                 kfree(edesc);
1337         }
1338
1339         return ret;
1340 }
1341
1342 /* submit first update job descriptor after init */
1343 static int ahash_update_first(struct ahash_request *req)
1344 {
1345         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1346         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1347         struct caam_hash_state *state = ahash_request_ctx(req);
1348         struct device *jrdev = ctx->jrdev;
1349         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1350                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1351         u8 *next_buf = state->buf_0 + state->current_buf *
1352                        CAAM_MAX_HASH_BLOCK_SIZE;
1353         int *next_buflen = &state->buflen_0 + state->current_buf;
1354         int to_hash;
1355         u32 *sh_desc = ctx->sh_desc_update_first, *desc;
1356         dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1357         int sec4_sg_bytes, src_nents;
1358         dma_addr_t src_dma;
1359         u32 options;
1360         struct ahash_edesc *edesc;
1361         bool chained = false;
1362         int ret = 0;
1363         int sh_len;
1364
1365         *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1366                                       1);
1367         to_hash = req->nbytes - *next_buflen;
1368
1369         if (to_hash) {
1370                 src_nents = sg_count(req->src, req->nbytes - (*next_buflen),
1371                                      &chained);
1372                 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1373                                    DMA_TO_DEVICE, chained);
1374                 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1375
1376                 /*
1377                  * allocate space for base edesc and hw desc commands,
1378                  * link tables
1379                  */
1380                 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1381                                 sec4_sg_bytes, GFP_DMA | flags);
1382                 if (!edesc) {
1383                         dev_err(jrdev,
1384                                 "could not allocate extended descriptor\n");
1385                         return -ENOMEM;
1386                 }
1387
1388                 edesc->src_nents = src_nents;
1389                 edesc->chained = chained;
1390                 edesc->sec4_sg_bytes = sec4_sg_bytes;
1391                 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1392                                  DESC_JOB_IO_LEN;
1393                 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1394                                                     sec4_sg_bytes,
1395                                                     DMA_TO_DEVICE);
1396
1397                 if (src_nents) {
1398                         sg_to_sec4_sg_last(req->src, src_nents,
1399                                            edesc->sec4_sg, 0);
1400                         src_dma = edesc->sec4_sg_dma;
1401                         options = LDST_SGF;
1402                 } else {
1403                         src_dma = sg_dma_address(req->src);
1404                         options = 0;
1405                 }
1406
1407                 if (*next_buflen)
1408                         sg_copy_part(next_buf, req->src, to_hash, req->nbytes);
1409
1410                 sh_len = desc_len(sh_desc);
1411                 desc = edesc->hw_desc;
1412                 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1413                                      HDR_REVERSE);
1414
1415                 append_seq_in_ptr(desc, src_dma, to_hash, options);
1416
1417                 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1418
1419 #ifdef DEBUG
1420                 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1421                                DUMP_PREFIX_ADDRESS, 16, 4, desc,
1422                                desc_bytes(desc), 1);
1423 #endif
1424
1425                 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
1426                                       req);
1427                 if (!ret) {
1428                         ret = -EINPROGRESS;
1429                         state->update = ahash_update_ctx;
1430                         state->finup = ahash_finup_ctx;
1431                         state->final = ahash_final_ctx;
1432                 } else {
1433                         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1434                                         DMA_TO_DEVICE);
1435                         kfree(edesc);
1436                 }
1437         } else if (*next_buflen) {
1438                 state->update = ahash_update_no_ctx;
1439                 state->finup = ahash_finup_no_ctx;
1440                 state->final = ahash_final_no_ctx;
1441                 sg_copy(next_buf, req->src, req->nbytes);
1442         }
1443 #ifdef DEBUG
1444         print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1445                        DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1446                        *next_buflen, 1);
1447 #endif
1448
1449         return ret;
1450 }
1451
1452 static int ahash_finup_first(struct ahash_request *req)
1453 {
1454         return ahash_digest(req);
1455 }
1456
1457 static int ahash_init(struct ahash_request *req)
1458 {
1459         struct caam_hash_state *state = ahash_request_ctx(req);
1460
1461         state->update = ahash_update_first;
1462         state->finup = ahash_finup_first;
1463         state->final = ahash_final_no_ctx;
1464
1465         state->current_buf = 0;
1466
1467         return 0;
1468 }
1469
1470 static int ahash_update(struct ahash_request *req)
1471 {
1472         struct caam_hash_state *state = ahash_request_ctx(req);
1473
1474         return state->update(req);
1475 }
1476
1477 static int ahash_finup(struct ahash_request *req)
1478 {
1479         struct caam_hash_state *state = ahash_request_ctx(req);
1480
1481         return state->finup(req);
1482 }
1483
1484 static int ahash_final(struct ahash_request *req)
1485 {
1486         struct caam_hash_state *state = ahash_request_ctx(req);
1487
1488         return state->final(req);
1489 }
1490
1491 static int ahash_export(struct ahash_request *req, void *out)
1492 {
1493         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1494         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1495         struct caam_hash_state *state = ahash_request_ctx(req);
1496
1497         memcpy(out, ctx, sizeof(struct caam_hash_ctx));
1498         memcpy(out + sizeof(struct caam_hash_ctx), state,
1499                sizeof(struct caam_hash_state));
1500         return 0;
1501 }
1502
1503 static int ahash_import(struct ahash_request *req, const void *in)
1504 {
1505         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1506         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1507         struct caam_hash_state *state = ahash_request_ctx(req);
1508
1509         memcpy(ctx, in, sizeof(struct caam_hash_ctx));
1510         memcpy(state, in + sizeof(struct caam_hash_ctx),
1511                sizeof(struct caam_hash_state));
1512         return 0;
1513 }
1514
1515 struct caam_hash_template {
1516         char name[CRYPTO_MAX_ALG_NAME];
1517         char driver_name[CRYPTO_MAX_ALG_NAME];
1518         char hmac_name[CRYPTO_MAX_ALG_NAME];
1519         char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1520         unsigned int blocksize;
1521         struct ahash_alg template_ahash;
1522         u32 alg_type;
1523         u32 alg_op;
1524 };
1525
1526 /* ahash descriptors */
1527 static struct caam_hash_template driver_hash[] = {
1528         {
1529                 .name = "sha1",
1530                 .driver_name = "sha1-caam",
1531                 .hmac_name = "hmac(sha1)",
1532                 .hmac_driver_name = "hmac-sha1-caam",
1533                 .blocksize = SHA1_BLOCK_SIZE,
1534                 .template_ahash = {
1535                         .init = ahash_init,
1536                         .update = ahash_update,
1537                         .final = ahash_final,
1538                         .finup = ahash_finup,
1539                         .digest = ahash_digest,
1540                         .export = ahash_export,
1541                         .import = ahash_import,
1542                         .setkey = ahash_setkey,
1543                         .halg = {
1544                                 .digestsize = SHA1_DIGEST_SIZE,
1545                                 },
1546                         },
1547                 .alg_type = OP_ALG_ALGSEL_SHA1,
1548                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1549         }, {
1550                 .name = "sha224",
1551                 .driver_name = "sha224-caam",
1552                 .hmac_name = "hmac(sha224)",
1553                 .hmac_driver_name = "hmac-sha224-caam",
1554                 .blocksize = SHA224_BLOCK_SIZE,
1555                 .template_ahash = {
1556                         .init = ahash_init,
1557                         .update = ahash_update,
1558                         .final = ahash_final,
1559                         .finup = ahash_finup,
1560                         .digest = ahash_digest,
1561                         .export = ahash_export,
1562                         .import = ahash_import,
1563                         .setkey = ahash_setkey,
1564                         .halg = {
1565                                 .digestsize = SHA224_DIGEST_SIZE,
1566                                 },
1567                         },
1568                 .alg_type = OP_ALG_ALGSEL_SHA224,
1569                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1570         }, {
1571                 .name = "sha256",
1572                 .driver_name = "sha256-caam",
1573                 .hmac_name = "hmac(sha256)",
1574                 .hmac_driver_name = "hmac-sha256-caam",
1575                 .blocksize = SHA256_BLOCK_SIZE,
1576                 .template_ahash = {
1577                         .init = ahash_init,
1578                         .update = ahash_update,
1579                         .final = ahash_final,
1580                         .finup = ahash_finup,
1581                         .digest = ahash_digest,
1582                         .export = ahash_export,
1583                         .import = ahash_import,
1584                         .setkey = ahash_setkey,
1585                         .halg = {
1586                                 .digestsize = SHA256_DIGEST_SIZE,
1587                                 },
1588                         },
1589                 .alg_type = OP_ALG_ALGSEL_SHA256,
1590                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1591         }, {
1592                 .name = "sha384",
1593                 .driver_name = "sha384-caam",
1594                 .hmac_name = "hmac(sha384)",
1595                 .hmac_driver_name = "hmac-sha384-caam",
1596                 .blocksize = SHA384_BLOCK_SIZE,
1597                 .template_ahash = {
1598                         .init = ahash_init,
1599                         .update = ahash_update,
1600                         .final = ahash_final,
1601                         .finup = ahash_finup,
1602                         .digest = ahash_digest,
1603                         .export = ahash_export,
1604                         .import = ahash_import,
1605                         .setkey = ahash_setkey,
1606                         .halg = {
1607                                 .digestsize = SHA384_DIGEST_SIZE,
1608                                 },
1609                         },
1610                 .alg_type = OP_ALG_ALGSEL_SHA384,
1611                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1612         }, {
1613                 .name = "sha512",
1614                 .driver_name = "sha512-caam",
1615                 .hmac_name = "hmac(sha512)",
1616                 .hmac_driver_name = "hmac-sha512-caam",
1617                 .blocksize = SHA512_BLOCK_SIZE,
1618                 .template_ahash = {
1619                         .init = ahash_init,
1620                         .update = ahash_update,
1621                         .final = ahash_final,
1622                         .finup = ahash_finup,
1623                         .digest = ahash_digest,
1624                         .export = ahash_export,
1625                         .import = ahash_import,
1626                         .setkey = ahash_setkey,
1627                         .halg = {
1628                                 .digestsize = SHA512_DIGEST_SIZE,
1629                                 },
1630                         },
1631                 .alg_type = OP_ALG_ALGSEL_SHA512,
1632                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1633         }, {
1634                 .name = "md5",
1635                 .driver_name = "md5-caam",
1636                 .hmac_name = "hmac(md5)",
1637                 .hmac_driver_name = "hmac-md5-caam",
1638                 .blocksize = MD5_BLOCK_WORDS * 4,
1639                 .template_ahash = {
1640                         .init = ahash_init,
1641                         .update = ahash_update,
1642                         .final = ahash_final,
1643                         .finup = ahash_finup,
1644                         .digest = ahash_digest,
1645                         .export = ahash_export,
1646                         .import = ahash_import,
1647                         .setkey = ahash_setkey,
1648                         .halg = {
1649                                 .digestsize = MD5_DIGEST_SIZE,
1650                                 },
1651                         },
1652                 .alg_type = OP_ALG_ALGSEL_MD5,
1653                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1654         },
1655 };
1656
1657 struct caam_hash_alg {
1658         struct list_head entry;
1659         int alg_type;
1660         int alg_op;
1661         struct ahash_alg ahash_alg;
1662 };
1663
1664 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1665 {
1666         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1667         struct crypto_alg *base = tfm->__crt_alg;
1668         struct hash_alg_common *halg =
1669                  container_of(base, struct hash_alg_common, base);
1670         struct ahash_alg *alg =
1671                  container_of(halg, struct ahash_alg, halg);
1672         struct caam_hash_alg *caam_hash =
1673                  container_of(alg, struct caam_hash_alg, ahash_alg);
1674         struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1675         /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1676         static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1677                                          HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1678                                          HASH_MSG_LEN + 32,
1679                                          HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1680                                          HASH_MSG_LEN + 64,
1681                                          HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1682         int ret = 0;
1683
1684         /*
1685          * Get a Job ring from Job Ring driver to ensure in-order
1686          * crypto request processing per tfm
1687          */
1688         ctx->jrdev = caam_jr_alloc();
1689         if (IS_ERR(ctx->jrdev)) {
1690                 pr_err("Job Ring Device allocation for transform failed\n");
1691                 return PTR_ERR(ctx->jrdev);
1692         }
1693         /* copy descriptor header template value */
1694         ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1695         ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1696
1697         ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1698                                   OP_ALG_ALGSEL_SHIFT];
1699
1700         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1701                                  sizeof(struct caam_hash_state));
1702
1703         ret = ahash_set_sh_desc(ahash);
1704
1705         return ret;
1706 }
1707
1708 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1709 {
1710         struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1711
1712         if (ctx->sh_desc_update_dma &&
1713             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1714                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1715                                  desc_bytes(ctx->sh_desc_update),
1716                                  DMA_TO_DEVICE);
1717         if (ctx->sh_desc_update_first_dma &&
1718             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1719                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1720                                  desc_bytes(ctx->sh_desc_update_first),
1721                                  DMA_TO_DEVICE);
1722         if (ctx->sh_desc_fin_dma &&
1723             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1724                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1725                                  desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1726         if (ctx->sh_desc_digest_dma &&
1727             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1728                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1729                                  desc_bytes(ctx->sh_desc_digest),
1730                                  DMA_TO_DEVICE);
1731         if (ctx->sh_desc_finup_dma &&
1732             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1733                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1734                                  desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
1735
1736         caam_jr_free(ctx->jrdev);
1737 }
1738
1739 static void __exit caam_algapi_hash_exit(void)
1740 {
1741         struct caam_hash_alg *t_alg, *n;
1742
1743         if (!hash_list.next)
1744                 return;
1745
1746         list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1747                 crypto_unregister_ahash(&t_alg->ahash_alg);
1748                 list_del(&t_alg->entry);
1749                 kfree(t_alg);
1750         }
1751 }
1752
1753 static struct caam_hash_alg *
1754 caam_hash_alloc(struct caam_hash_template *template,
1755                 bool keyed)
1756 {
1757         struct caam_hash_alg *t_alg;
1758         struct ahash_alg *halg;
1759         struct crypto_alg *alg;
1760
1761         t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
1762         if (!t_alg) {
1763                 pr_err("failed to allocate t_alg\n");
1764                 return ERR_PTR(-ENOMEM);
1765         }
1766
1767         t_alg->ahash_alg = template->template_ahash;
1768         halg = &t_alg->ahash_alg;
1769         alg = &halg->halg.base;
1770
1771         if (keyed) {
1772                 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1773                          template->hmac_name);
1774                 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1775                          template->hmac_driver_name);
1776         } else {
1777                 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1778                          template->name);
1779                 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1780                          template->driver_name);
1781         }
1782         alg->cra_module = THIS_MODULE;
1783         alg->cra_init = caam_hash_cra_init;
1784         alg->cra_exit = caam_hash_cra_exit;
1785         alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1786         alg->cra_priority = CAAM_CRA_PRIORITY;
1787         alg->cra_blocksize = template->blocksize;
1788         alg->cra_alignmask = 0;
1789         alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1790         alg->cra_type = &crypto_ahash_type;
1791
1792         t_alg->alg_type = template->alg_type;
1793         t_alg->alg_op = template->alg_op;
1794
1795         return t_alg;
1796 }
1797
1798 static int __init caam_algapi_hash_init(void)
1799 {
1800         int i = 0, err = 0;
1801
1802         INIT_LIST_HEAD(&hash_list);
1803
1804         /* register crypto algorithms the device supports */
1805         for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1806                 /* TODO: check if h/w supports alg */
1807                 struct caam_hash_alg *t_alg;
1808
1809                 /* register hmac version */
1810                 t_alg = caam_hash_alloc(&driver_hash[i], true);
1811                 if (IS_ERR(t_alg)) {
1812                         err = PTR_ERR(t_alg);
1813                         pr_warn("%s alg allocation failed\n",
1814                                 driver_hash[i].driver_name);
1815                         continue;
1816                 }
1817
1818                 err = crypto_register_ahash(&t_alg->ahash_alg);
1819                 if (err) {
1820                         pr_warn("%s alg registration failed\n",
1821                                 t_alg->ahash_alg.halg.base.cra_driver_name);
1822                         kfree(t_alg);
1823                 } else
1824                         list_add_tail(&t_alg->entry, &hash_list);
1825
1826                 /* register unkeyed version */
1827                 t_alg = caam_hash_alloc(&driver_hash[i], false);
1828                 if (IS_ERR(t_alg)) {
1829                         err = PTR_ERR(t_alg);
1830                         pr_warn("%s alg allocation failed\n",
1831                                 driver_hash[i].driver_name);
1832                         continue;
1833                 }
1834
1835                 err = crypto_register_ahash(&t_alg->ahash_alg);
1836                 if (err) {
1837                         pr_warn("%s alg registration failed\n",
1838                                 t_alg->ahash_alg.halg.base.cra_driver_name);
1839                         kfree(t_alg);
1840                 } else
1841                         list_add_tail(&t_alg->entry, &hash_list);
1842         }
1843
1844         return err;
1845 }
1846
1847 module_init(caam_algapi_hash_init);
1848 module_exit(caam_algapi_hash_exit);
1849
1850 MODULE_LICENSE("GPL");
1851 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1852 MODULE_AUTHOR("Freescale Semiconductor - NMG");