Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[linux-drm-fsl-dcu.git] / net / xfrm / xfrm_algo.c
1 /*
2  * xfrm algorithm interface
3  *
4  * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the Free
8  * Software Foundation; either version 2 of the License, or (at your option)
9  * any later version.
10  */
11
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pfkeyv2.h>
15 #include <linux/crypto.h>
16 #include <net/xfrm.h>
17 #if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE)
18 #include <net/ah.h>
19 #endif
20 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
21 #include <net/esp.h>
22 #endif
23 #include <asm/scatterlist.h>
24
25 /*
26  * Algorithms supported by IPsec.  These entries contain properties which
27  * are used in key negotiation and xfrm processing, and are used to verify
28  * that instantiated crypto transforms have correct parameters for IPsec
29  * purposes.
30  */
31 static struct xfrm_algo_desc aalg_list[] = {
32 {
33         .name = "hmac(digest_null)",
34         .compat = "digest_null",
35
36         .uinfo = {
37                 .auth = {
38                         .icv_truncbits = 0,
39                         .icv_fullbits = 0,
40                 }
41         },
42
43         .desc = {
44                 .sadb_alg_id = SADB_X_AALG_NULL,
45                 .sadb_alg_ivlen = 0,
46                 .sadb_alg_minbits = 0,
47                 .sadb_alg_maxbits = 0
48         }
49 },
50 {
51         .name = "hmac(md5)",
52         .compat = "md5",
53
54         .uinfo = {
55                 .auth = {
56                         .icv_truncbits = 96,
57                         .icv_fullbits = 128,
58                 }
59         },
60
61         .desc = {
62                 .sadb_alg_id = SADB_AALG_MD5HMAC,
63                 .sadb_alg_ivlen = 0,
64                 .sadb_alg_minbits = 128,
65                 .sadb_alg_maxbits = 128
66         }
67 },
68 {
69         .name = "hmac(sha1)",
70         .compat = "sha1",
71
72         .uinfo = {
73                 .auth = {
74                         .icv_truncbits = 96,
75                         .icv_fullbits = 160,
76                 }
77         },
78
79         .desc = {
80                 .sadb_alg_id = SADB_AALG_SHA1HMAC,
81                 .sadb_alg_ivlen = 0,
82                 .sadb_alg_minbits = 160,
83                 .sadb_alg_maxbits = 160
84         }
85 },
86 {
87         .name = "hmac(sha256)",
88         .compat = "sha256",
89
90         .uinfo = {
91                 .auth = {
92                         .icv_truncbits = 96,
93                         .icv_fullbits = 256,
94                 }
95         },
96
97         .desc = {
98                 .sadb_alg_id = SADB_X_AALG_SHA2_256HMAC,
99                 .sadb_alg_ivlen = 0,
100                 .sadb_alg_minbits = 256,
101                 .sadb_alg_maxbits = 256
102         }
103 },
104 {
105         .name = "hmac(ripemd160)",
106         .compat = "ripemd160",
107
108         .uinfo = {
109                 .auth = {
110                         .icv_truncbits = 96,
111                         .icv_fullbits = 160,
112                 }
113         },
114
115         .desc = {
116                 .sadb_alg_id = SADB_X_AALG_RIPEMD160HMAC,
117                 .sadb_alg_ivlen = 0,
118                 .sadb_alg_minbits = 160,
119                 .sadb_alg_maxbits = 160
120         }
121 },
122 {
123         .name = "xcbc(aes)",
124
125         .uinfo = {
126                 .auth = {
127                         .icv_truncbits = 96,
128                         .icv_fullbits = 128,
129                 }
130         },
131
132         .desc = {
133                 .sadb_alg_id = SADB_X_AALG_AES_XCBC_MAC,
134                 .sadb_alg_ivlen = 0,
135                 .sadb_alg_minbits = 128,
136                 .sadb_alg_maxbits = 128
137         }
138 },
139 };
140
141 static struct xfrm_algo_desc ealg_list[] = {
142 {
143         .name = "ecb(cipher_null)",
144         .compat = "cipher_null",
145
146         .uinfo = {
147                 .encr = {
148                         .blockbits = 8,
149                         .defkeybits = 0,
150                 }
151         },
152
153         .desc = {
154                 .sadb_alg_id =  SADB_EALG_NULL,
155                 .sadb_alg_ivlen = 0,
156                 .sadb_alg_minbits = 0,
157                 .sadb_alg_maxbits = 0
158         }
159 },
160 {
161         .name = "cbc(des)",
162         .compat = "des",
163
164         .uinfo = {
165                 .encr = {
166                         .blockbits = 64,
167                         .defkeybits = 64,
168                 }
169         },
170
171         .desc = {
172                 .sadb_alg_id = SADB_EALG_DESCBC,
173                 .sadb_alg_ivlen = 8,
174                 .sadb_alg_minbits = 64,
175                 .sadb_alg_maxbits = 64
176         }
177 },
178 {
179         .name = "cbc(des3_ede)",
180         .compat = "des3_ede",
181
182         .uinfo = {
183                 .encr = {
184                         .blockbits = 64,
185                         .defkeybits = 192,
186                 }
187         },
188
189         .desc = {
190                 .sadb_alg_id = SADB_EALG_3DESCBC,
191                 .sadb_alg_ivlen = 8,
192                 .sadb_alg_minbits = 192,
193                 .sadb_alg_maxbits = 192
194         }
195 },
196 {
197         .name = "cbc(cast128)",
198         .compat = "cast128",
199
200         .uinfo = {
201                 .encr = {
202                         .blockbits = 64,
203                         .defkeybits = 128,
204                 }
205         },
206
207         .desc = {
208                 .sadb_alg_id = SADB_X_EALG_CASTCBC,
209                 .sadb_alg_ivlen = 8,
210                 .sadb_alg_minbits = 40,
211                 .sadb_alg_maxbits = 128
212         }
213 },
214 {
215         .name = "cbc(blowfish)",
216         .compat = "blowfish",
217
218         .uinfo = {
219                 .encr = {
220                         .blockbits = 64,
221                         .defkeybits = 128,
222                 }
223         },
224
225         .desc = {
226                 .sadb_alg_id = SADB_X_EALG_BLOWFISHCBC,
227                 .sadb_alg_ivlen = 8,
228                 .sadb_alg_minbits = 40,
229                 .sadb_alg_maxbits = 448
230         }
231 },
232 {
233         .name = "cbc(aes)",
234         .compat = "aes",
235
236         .uinfo = {
237                 .encr = {
238                         .blockbits = 128,
239                         .defkeybits = 128,
240                 }
241         },
242
243         .desc = {
244                 .sadb_alg_id = SADB_X_EALG_AESCBC,
245                 .sadb_alg_ivlen = 8,
246                 .sadb_alg_minbits = 128,
247                 .sadb_alg_maxbits = 256
248         }
249 },
250 {
251         .name = "cbc(serpent)",
252         .compat = "serpent",
253
254         .uinfo = {
255                 .encr = {
256                         .blockbits = 128,
257                         .defkeybits = 128,
258                 }
259         },
260
261         .desc = {
262                 .sadb_alg_id = SADB_X_EALG_SERPENTCBC,
263                 .sadb_alg_ivlen = 8,
264                 .sadb_alg_minbits = 128,
265                 .sadb_alg_maxbits = 256,
266         }
267 },
268 {
269         .name = "cbc(camellia)",
270
271         .uinfo = {
272                 .encr = {
273                         .blockbits = 128,
274                         .defkeybits = 128,
275                 }
276         },
277
278         .desc = {
279                 .sadb_alg_id = SADB_X_EALG_CAMELLIACBC,
280                 .sadb_alg_ivlen = 8,
281                 .sadb_alg_minbits = 128,
282                 .sadb_alg_maxbits = 256
283         }
284 },
285 {
286         .name = "cbc(twofish)",
287         .compat = "twofish",
288
289         .uinfo = {
290                 .encr = {
291                         .blockbits = 128,
292                         .defkeybits = 128,
293                 }
294         },
295
296         .desc = {
297                 .sadb_alg_id = SADB_X_EALG_TWOFISHCBC,
298                 .sadb_alg_ivlen = 8,
299                 .sadb_alg_minbits = 128,
300                 .sadb_alg_maxbits = 256
301         }
302 },
303 };
304
305 static struct xfrm_algo_desc calg_list[] = {
306 {
307         .name = "deflate",
308         .uinfo = {
309                 .comp = {
310                         .threshold = 90,
311                 }
312         },
313         .desc = { .sadb_alg_id = SADB_X_CALG_DEFLATE }
314 },
315 {
316         .name = "lzs",
317         .uinfo = {
318                 .comp = {
319                         .threshold = 90,
320                 }
321         },
322         .desc = { .sadb_alg_id = SADB_X_CALG_LZS }
323 },
324 {
325         .name = "lzjh",
326         .uinfo = {
327                 .comp = {
328                         .threshold = 50,
329                 }
330         },
331         .desc = { .sadb_alg_id = SADB_X_CALG_LZJH }
332 },
333 };
334
335 static inline int aalg_entries(void)
336 {
337         return ARRAY_SIZE(aalg_list);
338 }
339
340 static inline int ealg_entries(void)
341 {
342         return ARRAY_SIZE(ealg_list);
343 }
344
345 static inline int calg_entries(void)
346 {
347         return ARRAY_SIZE(calg_list);
348 }
349
350 /* Todo: generic iterators */
351 struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id)
352 {
353         int i;
354
355         for (i = 0; i < aalg_entries(); i++) {
356                 if (aalg_list[i].desc.sadb_alg_id == alg_id) {
357                         if (aalg_list[i].available)
358                                 return &aalg_list[i];
359                         else
360                                 break;
361                 }
362         }
363         return NULL;
364 }
365 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byid);
366
367 struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id)
368 {
369         int i;
370
371         for (i = 0; i < ealg_entries(); i++) {
372                 if (ealg_list[i].desc.sadb_alg_id == alg_id) {
373                         if (ealg_list[i].available)
374                                 return &ealg_list[i];
375                         else
376                                 break;
377                 }
378         }
379         return NULL;
380 }
381 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byid);
382
383 struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id)
384 {
385         int i;
386
387         for (i = 0; i < calg_entries(); i++) {
388                 if (calg_list[i].desc.sadb_alg_id == alg_id) {
389                         if (calg_list[i].available)
390                                 return &calg_list[i];
391                         else
392                                 break;
393                 }
394         }
395         return NULL;
396 }
397 EXPORT_SYMBOL_GPL(xfrm_calg_get_byid);
398
399 static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list,
400                                               int entries, u32 type, u32 mask,
401                                               char *name, int probe)
402 {
403         int i, status;
404
405         if (!name)
406                 return NULL;
407
408         for (i = 0; i < entries; i++) {
409                 if (strcmp(name, list[i].name) &&
410                     (!list[i].compat || strcmp(name, list[i].compat)))
411                         continue;
412
413                 if (list[i].available)
414                         return &list[i];
415
416                 if (!probe)
417                         break;
418
419                 status = crypto_has_alg(list[i].name, type,
420                                         mask | CRYPTO_ALG_ASYNC);
421                 if (!status)
422                         break;
423
424                 list[i].available = status;
425                 return &list[i];
426         }
427         return NULL;
428 }
429
430 struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe)
431 {
432         return xfrm_get_byname(aalg_list, aalg_entries(),
433                                CRYPTO_ALG_TYPE_HASH, CRYPTO_ALG_TYPE_HASH_MASK,
434                                name, probe);
435 }
436 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname);
437
438 struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe)
439 {
440         return xfrm_get_byname(ealg_list, ealg_entries(),
441                                CRYPTO_ALG_TYPE_BLKCIPHER, CRYPTO_ALG_TYPE_MASK,
442                                name, probe);
443 }
444 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname);
445
446 struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe)
447 {
448         return xfrm_get_byname(calg_list, calg_entries(),
449                                CRYPTO_ALG_TYPE_COMPRESS, CRYPTO_ALG_TYPE_MASK,
450                                name, probe);
451 }
452 EXPORT_SYMBOL_GPL(xfrm_calg_get_byname);
453
454 struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx)
455 {
456         if (idx >= aalg_entries())
457                 return NULL;
458
459         return &aalg_list[idx];
460 }
461 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byidx);
462
463 struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx)
464 {
465         if (idx >= ealg_entries())
466                 return NULL;
467
468         return &ealg_list[idx];
469 }
470 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byidx);
471
472 /*
473  * Probe for the availability of crypto algorithms, and set the available
474  * flag for any algorithms found on the system.  This is typically called by
475  * pfkey during userspace SA add, update or register.
476  */
477 void xfrm_probe_algs(void)
478 {
479 #ifdef CONFIG_CRYPTO
480         int i, status;
481
482         BUG_ON(in_softirq());
483
484         for (i = 0; i < aalg_entries(); i++) {
485                 status = crypto_has_hash(aalg_list[i].name, 0,
486                                          CRYPTO_ALG_ASYNC);
487                 if (aalg_list[i].available != status)
488                         aalg_list[i].available = status;
489         }
490
491         for (i = 0; i < ealg_entries(); i++) {
492                 status = crypto_has_blkcipher(ealg_list[i].name, 0,
493                                               CRYPTO_ALG_ASYNC);
494                 if (ealg_list[i].available != status)
495                         ealg_list[i].available = status;
496         }
497
498         for (i = 0; i < calg_entries(); i++) {
499                 status = crypto_has_comp(calg_list[i].name, 0,
500                                          CRYPTO_ALG_ASYNC);
501                 if (calg_list[i].available != status)
502                         calg_list[i].available = status;
503         }
504 #endif
505 }
506 EXPORT_SYMBOL_GPL(xfrm_probe_algs);
507
508 int xfrm_count_auth_supported(void)
509 {
510         int i, n;
511
512         for (i = 0, n = 0; i < aalg_entries(); i++)
513                 if (aalg_list[i].available)
514                         n++;
515         return n;
516 }
517 EXPORT_SYMBOL_GPL(xfrm_count_auth_supported);
518
519 int xfrm_count_enc_supported(void)
520 {
521         int i, n;
522
523         for (i = 0, n = 0; i < ealg_entries(); i++)
524                 if (ealg_list[i].available)
525                         n++;
526         return n;
527 }
528 EXPORT_SYMBOL_GPL(xfrm_count_enc_supported);
529
530 /* Move to common area: it is shared with AH. */
531
532 int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
533                  int offset, int len, icv_update_fn_t icv_update)
534 {
535         int start = skb_headlen(skb);
536         int i, copy = start - offset;
537         int err;
538         struct scatterlist sg;
539
540         /* Checksum header. */
541         if (copy > 0) {
542                 if (copy > len)
543                         copy = len;
544
545                 sg.page = virt_to_page(skb->data + offset);
546                 sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
547                 sg.length = copy;
548
549                 err = icv_update(desc, &sg, copy);
550                 if (unlikely(err))
551                         return err;
552
553                 if ((len -= copy) == 0)
554                         return 0;
555                 offset += copy;
556         }
557
558         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
559                 int end;
560
561                 BUG_TRAP(start <= offset + len);
562
563                 end = start + skb_shinfo(skb)->frags[i].size;
564                 if ((copy = end - offset) > 0) {
565                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
566
567                         if (copy > len)
568                                 copy = len;
569
570                         sg.page = frag->page;
571                         sg.offset = frag->page_offset + offset-start;
572                         sg.length = copy;
573
574                         err = icv_update(desc, &sg, copy);
575                         if (unlikely(err))
576                                 return err;
577
578                         if (!(len -= copy))
579                                 return 0;
580                         offset += copy;
581                 }
582                 start = end;
583         }
584
585         if (skb_shinfo(skb)->frag_list) {
586                 struct sk_buff *list = skb_shinfo(skb)->frag_list;
587
588                 for (; list; list = list->next) {
589                         int end;
590
591                         BUG_TRAP(start <= offset + len);
592
593                         end = start + list->len;
594                         if ((copy = end - offset) > 0) {
595                                 if (copy > len)
596                                         copy = len;
597                                 err = skb_icv_walk(list, desc, offset-start,
598                                                    copy, icv_update);
599                                 if (unlikely(err))
600                                         return err;
601                                 if ((len -= copy) == 0)
602                                         return 0;
603                                 offset += copy;
604                         }
605                         start = end;
606                 }
607         }
608         BUG_ON(len);
609         return 0;
610 }
611 EXPORT_SYMBOL_GPL(skb_icv_walk);
612
613 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
614
615 /* Looking generic it is not used in another places. */
616
617 int
618 skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
619 {
620         int start = skb_headlen(skb);
621         int i, copy = start - offset;
622         int elt = 0;
623
624         if (copy > 0) {
625                 if (copy > len)
626                         copy = len;
627                 sg[elt].page = virt_to_page(skb->data + offset);
628                 sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
629                 sg[elt].length = copy;
630                 elt++;
631                 if ((len -= copy) == 0)
632                         return elt;
633                 offset += copy;
634         }
635
636         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
637                 int end;
638
639                 BUG_TRAP(start <= offset + len);
640
641                 end = start + skb_shinfo(skb)->frags[i].size;
642                 if ((copy = end - offset) > 0) {
643                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
644
645                         if (copy > len)
646                                 copy = len;
647                         sg[elt].page = frag->page;
648                         sg[elt].offset = frag->page_offset+offset-start;
649                         sg[elt].length = copy;
650                         elt++;
651                         if (!(len -= copy))
652                                 return elt;
653                         offset += copy;
654                 }
655                 start = end;
656         }
657
658         if (skb_shinfo(skb)->frag_list) {
659                 struct sk_buff *list = skb_shinfo(skb)->frag_list;
660
661                 for (; list; list = list->next) {
662                         int end;
663
664                         BUG_TRAP(start <= offset + len);
665
666                         end = start + list->len;
667                         if ((copy = end - offset) > 0) {
668                                 if (copy > len)
669                                         copy = len;
670                                 elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
671                                 if ((len -= copy) == 0)
672                                         return elt;
673                                 offset += copy;
674                         }
675                         start = end;
676                 }
677         }
678         BUG_ON(len);
679         return elt;
680 }
681 EXPORT_SYMBOL_GPL(skb_to_sgvec);
682
683 /* Check that skb data bits are writable. If they are not, copy data
684  * to newly created private area. If "tailbits" is given, make sure that
685  * tailbits bytes beyond current end of skb are writable.
686  *
687  * Returns amount of elements of scatterlist to load for subsequent
688  * transformations and pointer to writable trailer skb.
689  */
690
691 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
692 {
693         int copyflag;
694         int elt;
695         struct sk_buff *skb1, **skb_p;
696
697         /* If skb is cloned or its head is paged, reallocate
698          * head pulling out all the pages (pages are considered not writable
699          * at the moment even if they are anonymous).
700          */
701         if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
702             __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
703                 return -ENOMEM;
704
705         /* Easy case. Most of packets will go this way. */
706         if (!skb_shinfo(skb)->frag_list) {
707                 /* A little of trouble, not enough of space for trailer.
708                  * This should not happen, when stack is tuned to generate
709                  * good frames. OK, on miss we reallocate and reserve even more
710                  * space, 128 bytes is fair. */
711
712                 if (skb_tailroom(skb) < tailbits &&
713                     pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
714                         return -ENOMEM;
715
716                 /* Voila! */
717                 *trailer = skb;
718                 return 1;
719         }
720
721         /* Misery. We are in troubles, going to mincer fragments... */
722
723         elt = 1;
724         skb_p = &skb_shinfo(skb)->frag_list;
725         copyflag = 0;
726
727         while ((skb1 = *skb_p) != NULL) {
728                 int ntail = 0;
729
730                 /* The fragment is partially pulled by someone,
731                  * this can happen on input. Copy it and everything
732                  * after it. */
733
734                 if (skb_shared(skb1))
735                         copyflag = 1;
736
737                 /* If the skb is the last, worry about trailer. */
738
739                 if (skb1->next == NULL && tailbits) {
740                         if (skb_shinfo(skb1)->nr_frags ||
741                             skb_shinfo(skb1)->frag_list ||
742                             skb_tailroom(skb1) < tailbits)
743                                 ntail = tailbits + 128;
744                 }
745
746                 if (copyflag ||
747                     skb_cloned(skb1) ||
748                     ntail ||
749                     skb_shinfo(skb1)->nr_frags ||
750                     skb_shinfo(skb1)->frag_list) {
751                         struct sk_buff *skb2;
752
753                         /* Fuck, we are miserable poor guys... */
754                         if (ntail == 0)
755                                 skb2 = skb_copy(skb1, GFP_ATOMIC);
756                         else
757                                 skb2 = skb_copy_expand(skb1,
758                                                        skb_headroom(skb1),
759                                                        ntail,
760                                                        GFP_ATOMIC);
761                         if (unlikely(skb2 == NULL))
762                                 return -ENOMEM;
763
764                         if (skb1->sk)
765                                 skb_set_owner_w(skb2, skb1->sk);
766
767                         /* Looking around. Are we still alive?
768                          * OK, link new skb, drop old one */
769
770                         skb2->next = skb1->next;
771                         *skb_p = skb2;
772                         kfree_skb(skb1);
773                         skb1 = skb2;
774                 }
775                 elt++;
776                 *trailer = skb1;
777                 skb_p = &skb1->next;
778         }
779
780         return elt;
781 }
782 EXPORT_SYMBOL_GPL(skb_cow_data);
783
784 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
785 {
786         if (tail != skb) {
787                 skb->data_len += len;
788                 skb->len += len;
789         }
790         return skb_put(tail, len);
791 }
792 EXPORT_SYMBOL_GPL(pskb_put);
793 #endif