Merge remote-tracking branches 'regulator/fix/88pm800', 'regulator/fix/max8973',...
[linux-drm-fsl-dcu.git] / drivers / crypto / nx / nx-aes-gcm.c
1 /**
2  * AES GCM routines supporting the Power 7+ Nest Accelerators driver
3  *
4  * Copyright (C) 2012 International Business Machines Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; version 2 only.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18  *
19  * Author: Kent Yoder <yoder1@us.ibm.com>
20  */
21
22 #include <crypto/internal/aead.h>
23 #include <crypto/aes.h>
24 #include <crypto/algapi.h>
25 #include <crypto/scatterwalk.h>
26 #include <linux/module.h>
27 #include <linux/types.h>
28 #include <linux/crypto.h>
29 #include <asm/vio.h>
30
31 #include "nx_csbcpb.h"
32 #include "nx.h"
33
34
35 static int gcm_aes_nx_set_key(struct crypto_aead *tfm,
36                               const u8           *in_key,
37                               unsigned int        key_len)
38 {
39         struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
40         struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
41         struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
42
43         nx_ctx_init(nx_ctx, HCOP_FC_AES);
44
45         switch (key_len) {
46         case AES_KEYSIZE_128:
47                 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
48                 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
49                 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
50                 break;
51         case AES_KEYSIZE_192:
52                 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
53                 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_192);
54                 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
55                 break;
56         case AES_KEYSIZE_256:
57                 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
58                 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_256);
59                 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
60                 break;
61         default:
62                 return -EINVAL;
63         }
64
65         csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
66         memcpy(csbcpb->cpb.aes_gcm.key, in_key, key_len);
67
68         csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_GCA;
69         memcpy(csbcpb_aead->cpb.aes_gca.key, in_key, key_len);
70
71         return 0;
72 }
73
74 static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm,
75                                   const u8           *in_key,
76                                   unsigned int        key_len)
77 {
78         struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
79         char *nonce = nx_ctx->priv.gcm.nonce;
80         int rc;
81
82         if (key_len < 4)
83                 return -EINVAL;
84
85         key_len -= 4;
86
87         rc = gcm_aes_nx_set_key(tfm, in_key, key_len);
88         if (rc)
89                 goto out;
90
91         memcpy(nonce, in_key + key_len, 4);
92 out:
93         return rc;
94 }
95
96 static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm,
97                                       unsigned int authsize)
98 {
99         switch (authsize) {
100         case 8:
101         case 12:
102         case 16:
103                 break;
104         default:
105                 return -EINVAL;
106         }
107
108         return 0;
109 }
110
111 static int nx_gca(struct nx_crypto_ctx  *nx_ctx,
112                   struct aead_request   *req,
113                   u8                    *out)
114 {
115         int rc;
116         struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
117         struct scatter_walk walk;
118         struct nx_sg *nx_sg = nx_ctx->in_sg;
119         unsigned int nbytes = req->assoclen;
120         unsigned int processed = 0, to_process;
121         unsigned int max_sg_len;
122
123         if (nbytes <= AES_BLOCK_SIZE) {
124                 scatterwalk_start(&walk, req->src);
125                 scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG);
126                 scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0);
127                 return 0;
128         }
129
130         NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION;
131
132         /* page_limit: number of sg entries that fit on one page */
133         max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
134                            nx_ctx->ap->sglen);
135         max_sg_len = min_t(u64, max_sg_len,
136                            nx_ctx->ap->databytelen/NX_PAGE_SIZE);
137
138         do {
139                 /*
140                  * to_process: the data chunk to process in this update.
141                  * This value is bound by sg list limits.
142                  */
143                 to_process = min_t(u64, nbytes - processed,
144                                    nx_ctx->ap->databytelen);
145                 to_process = min_t(u64, to_process,
146                                    NX_PAGE_SIZE * (max_sg_len - 1));
147
148                 nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
149                                           req->src, processed, &to_process);
150
151                 if ((to_process + processed) < nbytes)
152                         NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE;
153                 else
154                         NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE;
155
156                 nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg)
157                                         * sizeof(struct nx_sg);
158
159                 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
160                                 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
161                 if (rc)
162                         return rc;
163
164                 memcpy(csbcpb_aead->cpb.aes_gca.in_pat,
165                                 csbcpb_aead->cpb.aes_gca.out_pat,
166                                 AES_BLOCK_SIZE);
167                 NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION;
168
169                 atomic_inc(&(nx_ctx->stats->aes_ops));
170                 atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
171
172                 processed += to_process;
173         } while (processed < nbytes);
174
175         memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE);
176
177         return rc;
178 }
179
180 static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
181 {
182         int rc;
183         struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
184         struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
185         struct nx_sg *nx_sg;
186         unsigned int nbytes = req->assoclen;
187         unsigned int processed = 0, to_process;
188         unsigned int max_sg_len;
189
190         /* Set GMAC mode */
191         csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC;
192
193         NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
194
195         /* page_limit: number of sg entries that fit on one page */
196         max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
197                            nx_ctx->ap->sglen);
198         max_sg_len = min_t(u64, max_sg_len,
199                            nx_ctx->ap->databytelen/NX_PAGE_SIZE);
200
201         /* Copy IV */
202         memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE);
203
204         do {
205                 /*
206                  * to_process: the data chunk to process in this update.
207                  * This value is bound by sg list limits.
208                  */
209                 to_process = min_t(u64, nbytes - processed,
210                                    nx_ctx->ap->databytelen);
211                 to_process = min_t(u64, to_process,
212                                    NX_PAGE_SIZE * (max_sg_len - 1));
213
214                 nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
215                                           req->src, processed, &to_process);
216
217                 if ((to_process + processed) < nbytes)
218                         NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
219                 else
220                         NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
221
222                 nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg)
223                                         * sizeof(struct nx_sg);
224
225                 csbcpb->cpb.aes_gcm.bit_length_data = 0;
226                 csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes;
227
228                 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
229                                 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
230                 if (rc)
231                         goto out;
232
233                 memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
234                         csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
235                 memcpy(csbcpb->cpb.aes_gcm.in_s0,
236                         csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
237
238                 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
239
240                 atomic_inc(&(nx_ctx->stats->aes_ops));
241                 atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
242
243                 processed += to_process;
244         } while (processed < nbytes);
245
246 out:
247         /* Restore GCM mode */
248         csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
249         return rc;
250 }
251
252 static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
253                      int enc)
254 {
255         int rc;
256         struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
257         struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
258         char out[AES_BLOCK_SIZE];
259         struct nx_sg *in_sg, *out_sg;
260         int len;
261
262         /* For scenarios where the input message is zero length, AES CTR mode
263          * may be used. Set the source data to be a single block (16B) of all
264          * zeros, and set the input IV value to be the same as the GMAC IV
265          * value. - nx_wb 4.8.1.3 */
266
267         /* Change to ECB mode */
268         csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
269         memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key,
270                         sizeof(csbcpb->cpb.aes_ecb.key));
271         if (enc)
272                 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
273         else
274                 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
275
276         len = AES_BLOCK_SIZE;
277
278         /* Encrypt the counter/IV */
279         in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info,
280                                  &len, nx_ctx->ap->sglen);
281
282         if (len != AES_BLOCK_SIZE)
283                 return -EINVAL;
284
285         len = sizeof(out);
286         out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, &len,
287                                   nx_ctx->ap->sglen);
288
289         if (len != sizeof(out))
290                 return -EINVAL;
291
292         nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
293         nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
294
295         rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
296                            desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
297         if (rc)
298                 goto out;
299         atomic_inc(&(nx_ctx->stats->aes_ops));
300
301         /* Copy out the auth tag */
302         memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out,
303                         crypto_aead_authsize(crypto_aead_reqtfm(req)));
304 out:
305         /* Restore XCBC mode */
306         csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
307
308         /*
309          * ECB key uses the same region that GCM AAD and counter, so it's safe
310          * to just fill it with zeroes.
311          */
312         memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key));
313
314         return rc;
315 }
316
317 static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
318 {
319         struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
320         struct nx_gcm_rctx *rctx = aead_request_ctx(req);
321         struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
322         struct blkcipher_desc desc;
323         unsigned int nbytes = req->cryptlen;
324         unsigned int processed = 0, to_process;
325         unsigned long irq_flags;
326         int rc = -EINVAL;
327
328         spin_lock_irqsave(&nx_ctx->lock, irq_flags);
329
330         desc.info = rctx->iv;
331         /* initialize the counter */
332         *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
333
334         if (nbytes == 0) {
335                 if (req->assoclen == 0)
336                         rc = gcm_empty(req, &desc, enc);
337                 else
338                         rc = gmac(req, &desc);
339                 if (rc)
340                         goto out;
341                 else
342                         goto mac;
343         }
344
345         /* Process associated data */
346         csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8;
347         if (req->assoclen) {
348                 rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad);
349                 if (rc)
350                         goto out;
351         }
352
353         /* Set flags for encryption */
354         NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
355         if (enc) {
356                 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
357         } else {
358                 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
359                 nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
360         }
361
362         do {
363                 to_process = nbytes - processed;
364
365                 csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
366                 desc.tfm = (struct crypto_blkcipher *) req->base.tfm;
367                 rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
368                                        req->src, &to_process,
369                                        processed + req->assoclen,
370                                        csbcpb->cpb.aes_gcm.iv_or_cnt);
371
372                 if (rc)
373                         goto out;
374
375                 if ((to_process + processed) < nbytes)
376                         NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
377                 else
378                         NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
379
380
381                 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
382                                    req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
383                 if (rc)
384                         goto out;
385
386                 memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
387                 memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
388                         csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
389                 memcpy(csbcpb->cpb.aes_gcm.in_s0,
390                         csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
391
392                 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
393
394                 atomic_inc(&(nx_ctx->stats->aes_ops));
395                 atomic64_add(csbcpb->csb.processed_byte_count,
396                              &(nx_ctx->stats->aes_bytes));
397
398                 processed += to_process;
399         } while (processed < nbytes);
400
401 mac:
402         if (enc) {
403                 /* copy out the auth tag */
404                 scatterwalk_map_and_copy(
405                         csbcpb->cpb.aes_gcm.out_pat_or_mac,
406                         req->dst, req->assoclen + nbytes,
407                         crypto_aead_authsize(crypto_aead_reqtfm(req)),
408                         SCATTERWALK_TO_SG);
409         } else {
410                 u8 *itag = nx_ctx->priv.gcm.iauth_tag;
411                 u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;
412
413                 scatterwalk_map_and_copy(
414                         itag, req->src, req->assoclen + nbytes,
415                         crypto_aead_authsize(crypto_aead_reqtfm(req)),
416                         SCATTERWALK_FROM_SG);
417                 rc = memcmp(itag, otag,
418                             crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
419                      -EBADMSG : 0;
420         }
421 out:
422         spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
423         return rc;
424 }
425
426 static int gcm_aes_nx_encrypt(struct aead_request *req)
427 {
428         struct nx_gcm_rctx *rctx = aead_request_ctx(req);
429         char *iv = rctx->iv;
430
431         memcpy(iv, req->iv, 12);
432
433         return gcm_aes_nx_crypt(req, 1);
434 }
435
436 static int gcm_aes_nx_decrypt(struct aead_request *req)
437 {
438         struct nx_gcm_rctx *rctx = aead_request_ctx(req);
439         char *iv = rctx->iv;
440
441         memcpy(iv, req->iv, 12);
442
443         return gcm_aes_nx_crypt(req, 0);
444 }
445
446 static int gcm4106_aes_nx_encrypt(struct aead_request *req)
447 {
448         struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
449         struct nx_gcm_rctx *rctx = aead_request_ctx(req);
450         char *iv = rctx->iv;
451         char *nonce = nx_ctx->priv.gcm.nonce;
452
453         memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
454         memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
455
456         return gcm_aes_nx_crypt(req, 1);
457 }
458
459 static int gcm4106_aes_nx_decrypt(struct aead_request *req)
460 {
461         struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
462         struct nx_gcm_rctx *rctx = aead_request_ctx(req);
463         char *iv = rctx->iv;
464         char *nonce = nx_ctx->priv.gcm.nonce;
465
466         memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
467         memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
468
469         return gcm_aes_nx_crypt(req, 0);
470 }
471
472 /* tell the block cipher walk routines that this is a stream cipher by
473  * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
474  * during encrypt/decrypt doesn't solve this problem, because it calls
475  * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
476  * but instead uses this tfm->blocksize. */
477 struct aead_alg nx_gcm_aes_alg = {
478         .base = {
479                 .cra_name        = "gcm(aes)",
480                 .cra_driver_name = "gcm-aes-nx",
481                 .cra_priority    = 300,
482                 .cra_blocksize   = 1,
483                 .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
484                 .cra_module      = THIS_MODULE,
485         },
486         .init        = nx_crypto_ctx_aes_gcm_init,
487         .exit        = nx_crypto_ctx_aead_exit,
488         .ivsize      = 12,
489         .maxauthsize = AES_BLOCK_SIZE,
490         .setkey      = gcm_aes_nx_set_key,
491         .encrypt     = gcm_aes_nx_encrypt,
492         .decrypt     = gcm_aes_nx_decrypt,
493 };
494
495 struct aead_alg nx_gcm4106_aes_alg = {
496         .base = {
497                 .cra_name        = "rfc4106(gcm(aes))",
498                 .cra_driver_name = "rfc4106-gcm-aes-nx",
499                 .cra_priority    = 300,
500                 .cra_blocksize   = 1,
501                 .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
502                 .cra_module      = THIS_MODULE,
503         },
504         .init        = nx_crypto_ctx_aes_gcm_init,
505         .exit        = nx_crypto_ctx_aead_exit,
506         .ivsize      = 8,
507         .maxauthsize = AES_BLOCK_SIZE,
508         .setkey      = gcm4106_aes_nx_set_key,
509         .setauthsize = gcm4106_aes_nx_setauthsize,
510         .encrypt     = gcm4106_aes_nx_encrypt,
511         .decrypt     = gcm4106_aes_nx_decrypt,
512 };