crypto - Update crypto for AES XTS and CTR
[dragonfly.git] / sys / opencrypto / cryptosoft.c
1 /*-
2  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
3  * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
4  *
5  * This code was written by Angelos D. Keromytis in Athens, Greece, in
6  * February 2000. Network Security Technologies Inc. (NSTI) kindly
7  * supported the development of this code.
8  *
9  * Copyright (c) 2000, 2001 Angelos D. Keromytis
10  *
11  * SMP modifications by Matthew Dillon for the DragonFlyBSD Project
12  *
13  * Permission to use, copy, and modify this software with or without fee
14  * is hereby granted, provided that this entire notice is included in
15  * all source code copies of any software which is or includes a copy or
16  * modification of this software.
17  *
18  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
19  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
20  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
21  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
22  * PURPOSE.
23  *
24  * $FreeBSD: src/sys/opencrypto/cryptosoft.c,v 1.23 2009/02/05 17:43:12 imp Exp $
25  * $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $
26  */
27
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/malloc.h>
31 #include <sys/mbuf.h>
32 #include <sys/module.h>
33 #include <sys/sysctl.h>
34 #include <sys/errno.h>
35 #include <sys/random.h>
36 #include <sys/kernel.h>
37 #include <sys/uio.h>
38 #include <sys/spinlock2.h>
39
40 #include <crypto/blowfish/blowfish.h>
41 #include <crypto/sha1.h>
42 #include <opencrypto/rmd160.h>
43 #include <opencrypto/cast.h>
44 #include <opencrypto/skipjack.h>
45 #include <sys/md5.h>
46
47 #include <opencrypto/cryptodev.h>
48 #include <opencrypto/cryptosoft.h>
49 #include <opencrypto/xform.h>
50
51 #include <sys/kobj.h>
52 #include <sys/bus.h>
53 #include "cryptodev_if.h"
54
55 static  int32_t swcr_id;
56 static  struct swcr_data **swcr_sessions = NULL;
57 static  u_int32_t swcr_sesnum;
58 static  u_int32_t swcr_minsesnum = 1;
59
60 static struct spinlock swcr_spin = SPINLOCK_INITIALIZER(swcr_spin);
61
62 u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN];
63 u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN];
64
65 static  int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
66 static  int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int);
67 static  int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
68 static  int swcr_freesession(device_t dev, u_int64_t tid);
69 static  int swcr_freesession_slot(struct swcr_data **swdp, u_int32_t sid);
70
71 /*
72  * Apply a symmetric encryption/decryption algorithm.
73  */
74 static int
75 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
76     int flags)
77 {
78         unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
79         unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
80         u_int8_t *kschedule;
81         u_int8_t *okschedule;
82         struct enc_xform *exf;
83         int i, k, j, blks, ivlen;
84         int error;
85         int explicit_kschedule;
86
87         exf = sw->sw_exf;
88         blks = exf->blocksize;
89         ivlen = exf->ivsize;
90
91         /* Check for non-padded data */
92         if (crd->crd_len % blks)
93                 return EINVAL;
94
95         /* Initialize the IV */
96         if (crd->crd_flags & CRD_F_ENCRYPT) {
97                 /* IV explicitly provided ? */
98                 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
99                         bcopy(crd->crd_iv, iv, ivlen);
100                 else
101                         karc4rand(iv, ivlen);
102
103                 /* Do we need to write the IV */
104                 if (!(crd->crd_flags & CRD_F_IV_PRESENT))
105                         crypto_copyback(flags, buf, crd->crd_inject, ivlen, iv);
106
107         } else {        /* Decryption */
108                         /* IV explicitly provided ? */
109                 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
110                         bcopy(crd->crd_iv, iv, ivlen);
111                 else {
112                         /* Get IV off buf */
113                         crypto_copydata(flags, buf, crd->crd_inject, ivlen, iv);
114                 }
115         }
116
117         ivp = iv;
118
119         /*
120          * xforms that provide a reinit method perform all IV
121          * handling themselves.
122          */
123         if (exf->reinit)
124                 exf->reinit(sw->sw_kschedule, iv);
125
126         /*
127          * The semantics are seriously broken because the session key
128          * storage was never designed for concurrent ops.
129          */
130         if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
131                 kschedule = NULL;
132                 explicit_kschedule = 1;
133                 error = exf->setkey(&kschedule,
134                                     crd->crd_key, crd->crd_klen / 8);
135                 if (error)
136                         goto done;
137         } else {
138                 spin_lock_wr(&swcr_spin);
139                 kschedule = sw->sw_kschedule;
140                 ++sw->sw_kschedule_refs;
141                 spin_unlock_wr(&swcr_spin);
142                 explicit_kschedule = 0;
143         }
144
145         if (flags & CRYPTO_F_IMBUF) {
146                 struct mbuf *m = (struct mbuf *) buf;
147
148                 /* Find beginning of data */
149                 m = m_getptr(m, crd->crd_skip, &k);
150                 if (m == NULL) {
151                         error = EINVAL;
152                         goto done;
153                 }
154
155                 i = crd->crd_len;
156
157                 while (i > 0) {
158                         /*
159                          * If there's insufficient data at the end of
160                          * an mbuf, we have to do some copying.
161                          */
162                         if (m->m_len < k + blks && m->m_len != k) {
163                                 m_copydata(m, k, blks, blk);
164
165                                 /* Actual encryption/decryption */
166                                 if (exf->reinit) {
167                                         if (crd->crd_flags & CRD_F_ENCRYPT) {
168                                                 exf->encrypt(kschedule,
169                                                     blk);
170                                         } else {
171                                                 exf->decrypt(kschedule,
172                                                     blk);
173                                         }
174                                 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
175                                         /* XOR with previous block */
176                                         for (j = 0; j < blks; j++)
177                                                 blk[j] ^= ivp[j];
178
179                                         exf->encrypt(kschedule, blk);
180
181                                         /*
182                                          * Keep encrypted block for XOR'ing
183                                          * with next block
184                                          */
185                                         bcopy(blk, iv, blks);
186                                         ivp = iv;
187                                 } else {        /* decrypt */
188                                         /*      
189                                          * Keep encrypted block for XOR'ing
190                                          * with next block
191                                          */
192                                         if (ivp == iv)
193                                                 bcopy(blk, piv, blks);
194                                         else
195                                                 bcopy(blk, iv, blks);
196
197                                         exf->decrypt(kschedule, blk);
198
199                                         /* XOR with previous block */
200                                         for (j = 0; j < blks; j++)
201                                                 blk[j] ^= ivp[j];
202
203                                         if (ivp == iv)
204                                                 bcopy(piv, iv, blks);
205                                         else
206                                                 ivp = iv;
207                                 }
208
209                                 /* Copy back decrypted block */
210                                 m_copyback(m, k, blks, blk);
211
212                                 /* Advance pointer */
213                                 m = m_getptr(m, k + blks, &k);
214                                 if (m == NULL) {
215                                         error = EINVAL;
216                                         goto done;
217                                 }
218
219                                 i -= blks;
220
221                                 /* Could be done... */
222                                 if (i == 0)
223                                         break;
224                         }
225
226                         /* Skip possibly empty mbufs */
227                         if (k == m->m_len) {
228                                 for (m = m->m_next; m && m->m_len == 0;
229                                     m = m->m_next)
230                                         ;
231                                 k = 0;
232                         }
233
234                         /* Sanity check */
235                         if (m == NULL) {
236                                 error = EINVAL;
237                                 goto done;
238                         }
239
240                         /*
241                          * Warning: idat may point to garbage here, but
242                          * we only use it in the while() loop, only if
243                          * there are indeed enough data.
244                          */
245                         idat = mtod(m, unsigned char *) + k;
246
247                         while (m->m_len >= k + blks && i > 0) {
248                                 if (exf->reinit) {
249                                         if (crd->crd_flags & CRD_F_ENCRYPT) {
250                                                 exf->encrypt(kschedule,
251                                                     idat);
252                                         } else {
253                                                 exf->decrypt(kschedule,
254                                                     idat);
255                                         }
256                                 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
257                                         /* XOR with previous block/IV */
258                                         for (j = 0; j < blks; j++)
259                                                 idat[j] ^= ivp[j];
260
261                                         exf->encrypt(kschedule, idat);
262                                         ivp = idat;
263                                 } else {        /* decrypt */
264                                         /*
265                                          * Keep encrypted block to be used
266                                          * in next block's processing.
267                                          */
268                                         if (ivp == iv)
269                                                 bcopy(idat, piv, blks);
270                                         else
271                                                 bcopy(idat, iv, blks);
272
273                                         exf->decrypt(kschedule, idat);
274
275                                         /* XOR with previous block/IV */
276                                         for (j = 0; j < blks; j++)
277                                                 idat[j] ^= ivp[j];
278
279                                         if (ivp == iv)
280                                                 bcopy(piv, iv, blks);
281                                         else
282                                                 ivp = iv;
283                                 }
284
285                                 idat += blks;
286                                 k += blks;
287                                 i -= blks;
288                         }
289                 }
290                 error = 0;      /* Done with mbuf encryption/decryption */
291         } else if (flags & CRYPTO_F_IOV) {
292                 struct uio *uio = (struct uio *) buf;
293                 struct iovec *iov;
294
295                 /* Find beginning of data */
296                 iov = cuio_getptr(uio, crd->crd_skip, &k);
297                 if (iov == NULL) {
298                         error = EINVAL;
299                         goto done;
300                 }
301
302                 i = crd->crd_len;
303
304                 while (i > 0) {
305                         /*
306                          * If there's insufficient data at the end of
307                          * an iovec, we have to do some copying.
308                          */
309                         if (iov->iov_len < k + blks && iov->iov_len != k) {
310                                 cuio_copydata(uio, k, blks, blk);
311
312                                 /* Actual encryption/decryption */
313                                 if (exf->reinit) {
314                                         if (crd->crd_flags & CRD_F_ENCRYPT) {
315                                                 exf->encrypt(kschedule,
316                                                     blk);
317                                         } else {
318                                                 exf->decrypt(kschedule,
319                                                     blk);
320                                         }
321                                 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
322                                         /* XOR with previous block */
323                                         for (j = 0; j < blks; j++)
324                                                 blk[j] ^= ivp[j];
325
326                                         exf->encrypt(kschedule, blk);
327
328                                         /*
329                                          * Keep encrypted block for XOR'ing
330                                          * with next block
331                                          */
332                                         bcopy(blk, iv, blks);
333                                         ivp = iv;
334                                 } else {        /* decrypt */
335                                         /*      
336                                          * Keep encrypted block for XOR'ing
337                                          * with next block
338                                          */
339                                         if (ivp == iv)
340                                                 bcopy(blk, piv, blks);
341                                         else
342                                                 bcopy(blk, iv, blks);
343
344                                         exf->decrypt(kschedule, blk);
345
346                                         /* XOR with previous block */
347                                         for (j = 0; j < blks; j++)
348                                                 blk[j] ^= ivp[j];
349
350                                         if (ivp == iv)
351                                                 bcopy(piv, iv, blks);
352                                         else
353                                                 ivp = iv;
354                                 }
355
356                                 /* Copy back decrypted block */
357                                 cuio_copyback(uio, k, blks, blk);
358
359                                 /* Advance pointer */
360                                 iov = cuio_getptr(uio, k + blks, &k);
361                                 if (iov == NULL) {
362                                         error = EINVAL;
363                                         goto done;
364                                 }
365
366                                 i -= blks;
367
368                                 /* Could be done... */
369                                 if (i == 0)
370                                         break;
371                         }
372
373                         /*
374                          * Warning: idat may point to garbage here, but
375                          * we only use it in the while() loop, only if
376                          * there are indeed enough data.
377                          */
378                         idat = (char *)iov->iov_base + k;
379
380                         while (iov->iov_len >= k + blks && i > 0) {
381                                 if (exf->reinit) {
382                                         if (crd->crd_flags & CRD_F_ENCRYPT) {
383                                                 exf->encrypt(kschedule,
384                                                     idat);
385                                         } else {
386                                                 exf->decrypt(kschedule,
387                                                     idat);
388                                         }
389                                 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
390                                         /* XOR with previous block/IV */
391                                         for (j = 0; j < blks; j++)
392                                                 idat[j] ^= ivp[j];
393
394                                         exf->encrypt(kschedule, idat);
395                                         ivp = idat;
396                                 } else {        /* decrypt */
397                                         /*
398                                          * Keep encrypted block to be used
399                                          * in next block's processing.
400                                          */
401                                         if (ivp == iv)
402                                                 bcopy(idat, piv, blks);
403                                         else
404                                                 bcopy(idat, iv, blks);
405
406                                         exf->decrypt(kschedule, idat);
407
408                                         /* XOR with previous block/IV */
409                                         for (j = 0; j < blks; j++)
410                                                 idat[j] ^= ivp[j];
411
412                                         if (ivp == iv)
413                                                 bcopy(piv, iv, blks);
414                                         else
415                                                 ivp = iv;
416                                 }
417
418                                 idat += blks;
419                                 k += blks;
420                                 i -= blks;
421                         }
422                         if (k == iov->iov_len) {
423                                 iov++;
424                                 k = 0;
425                         }
426                 }
427                 error = 0;      /* Done with iovec encryption/decryption */
428         } else {
429                 /*
430                  * contiguous buffer
431                  */
432                 if (crd->crd_flags & CRD_F_ENCRYPT) {
433                         for (i = crd->crd_skip;
434                             i < crd->crd_skip + crd->crd_len; i += blks) {
435                                 /* XOR with the IV/previous block, as appropriate. */
436                                 if (i == crd->crd_skip)
437                                         for (k = 0; k < blks; k++)
438                                                 buf[i + k] ^= ivp[k];
439                                 else
440                                         for (k = 0; k < blks; k++)
441                                                 buf[i + k] ^= buf[i + k - blks];
442                                 exf->encrypt(kschedule, buf + i);
443                         }
444                 } else {                /* Decrypt */
445                         /*
446                          * Start at the end, so we don't need to keep the
447                          * encrypted block as the IV for the next block.
448                          */
449                         for (i = crd->crd_skip + crd->crd_len - blks;
450                             i >= crd->crd_skip; i -= blks) {
451                                 exf->decrypt(kschedule, buf + i);
452
453                                 /* XOR with the IV/previous block, as appropriate */
454                                 if (i == crd->crd_skip)
455                                         for (k = 0; k < blks; k++)
456                                                 buf[i + k] ^= ivp[k];
457                                 else
458                                         for (k = 0; k < blks; k++)
459                                                 buf[i + k] ^= buf[i + k - blks];
460                         }
461                 }
462                 error = 0; /* Done w/contiguous buffer encrypt/decrypt */
463         }
464 done:
465         /*
466          * Cleanup - explicitly replace the session key if requested
467          *           (horrible semantics for concurrent operation)
468          */
469         if (explicit_kschedule) {
470                 spin_lock_wr(&swcr_spin);
471                 if (sw->sw_kschedule && sw->sw_kschedule_refs == 0) {
472                         okschedule = sw->sw_kschedule;
473                         sw->sw_kschedule = kschedule;
474                 } else {
475                         okschedule = NULL;
476                 }
477                 spin_unlock_wr(&swcr_spin);
478                 if (okschedule)
479                         exf->zerokey(&okschedule);
480         } else {
481                 spin_lock_wr(&swcr_spin);
482                 --sw->sw_kschedule_refs;
483                 spin_unlock_wr(&swcr_spin);
484         }
485         return error;
486 }
487
488 static void
489 swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key,
490     int klen)
491 {
492         int k;
493
494         klen /= 8;
495
496         switch (axf->type) {
497         case CRYPTO_MD5_HMAC:
498         case CRYPTO_SHA1_HMAC:
499         case CRYPTO_SHA2_256_HMAC:
500         case CRYPTO_SHA2_384_HMAC:
501         case CRYPTO_SHA2_512_HMAC:
502         case CRYPTO_NULL_HMAC:
503         case CRYPTO_RIPEMD160_HMAC:
504                 for (k = 0; k < klen; k++)
505                         key[k] ^= HMAC_IPAD_VAL;
506
507                 axf->Init(sw->sw_ictx);
508                 axf->Update(sw->sw_ictx, key, klen);
509                 axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen);
510
511                 for (k = 0; k < klen; k++)
512                         key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
513
514                 axf->Init(sw->sw_octx);
515                 axf->Update(sw->sw_octx, key, klen);
516                 axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen);
517
518                 for (k = 0; k < klen; k++)
519                         key[k] ^= HMAC_OPAD_VAL;
520                 break;
521         case CRYPTO_MD5_KPDK:
522         case CRYPTO_SHA1_KPDK:
523         {
524                 /* We need a buffer that can hold an md5 and a sha1 result. */
525                 u_char buf[SHA1_RESULTLEN];
526
527                 sw->sw_klen = klen;
528                 bcopy(key, sw->sw_octx, klen);
529                 axf->Init(sw->sw_ictx);
530                 axf->Update(sw->sw_ictx, key, klen);
531                 axf->Final(buf, sw->sw_ictx);
532                 break;
533         }
534         default:
535                 kprintf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d "
536                     "doesn't use keys.\n", __func__, axf->type);
537         }
538 }
539
540 /*
541  * Compute keyed-hash authenticator.
542  */
543 static int
544 swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
545     int flags)
546 {
547         unsigned char aalg[HASH_MAX_LEN];
548         struct auth_hash *axf;
549         union authctx ctx;
550         int err;
551
552         if (sw->sw_ictx == 0)
553                 return EINVAL;
554
555         axf = sw->sw_axf;
556
557         if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
558                 swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen);
559
560         bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
561
562         err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len,
563             (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx);
564         if (err)
565                 return err;
566
567         switch (sw->sw_alg) {
568         case CRYPTO_MD5_HMAC:
569         case CRYPTO_SHA1_HMAC:
570         case CRYPTO_SHA2_256_HMAC:
571         case CRYPTO_SHA2_384_HMAC:
572         case CRYPTO_SHA2_512_HMAC:
573         case CRYPTO_RIPEMD160_HMAC:
574                 if (sw->sw_octx == NULL)
575                         return EINVAL;
576
577                 axf->Final(aalg, &ctx);
578                 bcopy(sw->sw_octx, &ctx, axf->ctxsize);
579                 axf->Update(&ctx, aalg, axf->hashsize);
580                 axf->Final(aalg, &ctx);
581                 break;
582
583         case CRYPTO_MD5_KPDK:
584         case CRYPTO_SHA1_KPDK:
585                 if (sw->sw_octx == NULL)
586                         return EINVAL;
587
588                 axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
589                 axf->Final(aalg, &ctx);
590                 break;
591
592         case CRYPTO_NULL_HMAC:
593                 axf->Final(aalg, &ctx);
594                 break;
595         }
596
597         /* Inject the authentication data */
598         crypto_copyback(flags, buf, crd->crd_inject,
599             sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg);
600         return 0;
601 }
602
603 /*
604  * Apply a compression/decompression algorithm
605  */
606 static int
607 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
608              caddr_t buf, int flags)
609 {
610         u_int8_t *data, *out;
611         struct comp_algo *cxf;
612         int adj;
613         u_int32_t result;
614
615         cxf = sw->sw_cxf;
616
617         /*
618          * We must handle the whole buffer of data in one time
619          * then if there is not all the data in the mbuf, we must
620          * copy in a buffer.
621          */
622         data = kmalloc(crd->crd_len, M_CRYPTO_DATA, M_INTWAIT);
623         if (data == NULL)
624                 return (EINVAL);
625         crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data);
626
627         if (crd->crd_flags & CRD_F_COMP)
628                 result = cxf->compress(data, crd->crd_len, &out);
629         else
630                 result = cxf->decompress(data, crd->crd_len, &out);
631
632         kfree(data, M_CRYPTO_DATA);
633         if (result == 0)
634                 return EINVAL;
635
636         /* Copy back the (de)compressed data. m_copyback is
637          * extending the mbuf as necessary.
638          */
639         sw->sw_size = result;
640         /* Check the compressed size when doing compression */
641         if (crd->crd_flags & CRD_F_COMP) {
642                 if (result >= crd->crd_len) {
643                         /* Compression was useless, we lost time */
644                         kfree(out, M_CRYPTO_DATA);
645                         return 0;
646                 }
647         }
648
649         crypto_copyback(flags, buf, crd->crd_skip, result, out);
650         if (result < crd->crd_len) {
651                 adj = result - crd->crd_len;
652                 if (flags & CRYPTO_F_IMBUF) {
653                         adj = result - crd->crd_len;
654                         m_adj((struct mbuf *)buf, adj);
655                 } else if (flags & CRYPTO_F_IOV) {
656                         struct uio *uio = (struct uio *)buf;
657                         int ind;
658
659                         adj = crd->crd_len - result;
660                         ind = uio->uio_iovcnt - 1;
661
662                         while (adj > 0 && ind >= 0) {
663                                 if (adj < uio->uio_iov[ind].iov_len) {
664                                         uio->uio_iov[ind].iov_len -= adj;
665                                         break;
666                                 }
667
668                                 adj -= uio->uio_iov[ind].iov_len;
669                                 uio->uio_iov[ind].iov_len = 0;
670                                 ind--;
671                                 uio->uio_iovcnt--;
672                         }
673                 }
674         }
675         kfree(out, M_CRYPTO_DATA);
676         return 0;
677 }
678
679 /*
680  * Generate a new software session.
681  */
682 static int
683 swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
684 {
685         struct swcr_data *swd_base;
686         struct swcr_data **swd;
687         struct swcr_data **oswd;
688         struct auth_hash *axf;
689         struct enc_xform *txf;
690         struct comp_algo *cxf;
691         u_int32_t i;
692         u_int32_t n;
693         int error;
694
695         if (sid == NULL || cri == NULL)
696                 return EINVAL;
697
698         swd_base = NULL;
699         swd = &swd_base;
700
701         while (cri) {
702                 *swd = kmalloc(sizeof(struct swcr_data),
703                                M_CRYPTO_DATA, M_WAITOK | M_ZERO);
704
705                 switch (cri->cri_alg) {
706                 case CRYPTO_DES_CBC:
707                         txf = &enc_xform_des;
708                         goto enccommon;
709                 case CRYPTO_3DES_CBC:
710                         txf = &enc_xform_3des;
711                         goto enccommon;
712                 case CRYPTO_BLF_CBC:
713                         txf = &enc_xform_blf;
714                         goto enccommon;
715                 case CRYPTO_CAST_CBC:
716                         txf = &enc_xform_cast5;
717                         goto enccommon;
718                 case CRYPTO_SKIPJACK_CBC:
719                         txf = &enc_xform_skipjack;
720                         goto enccommon;
721                 case CRYPTO_RIJNDAEL128_CBC:
722                         txf = &enc_xform_rijndael128;
723                         goto enccommon;
724                 case CRYPTO_AES_XTS:
725                         txf = &enc_xform_aes_xts;
726                         goto enccommon;
727                 case CRYPTO_AES_CTR:
728                         txf = &enc_xform_aes_ctr;
729                         goto enccommon;
730                 case CRYPTO_CAMELLIA_CBC:
731                         txf = &enc_xform_camellia;
732                         goto enccommon;
733                 case CRYPTO_NULL_CBC:
734                         txf = &enc_xform_null;
735                         goto enccommon;
736                 enccommon:
737                         if (cri->cri_key != NULL) {
738                                 error = txf->setkey(&((*swd)->sw_kschedule),
739                                                     cri->cri_key,
740                                                     cri->cri_klen / 8);
741                                 if (error) {
742                                         swcr_freesession_slot(&swd_base, 0);
743                                         return error;
744                                 }
745                         }
746                         (*swd)->sw_exf = txf;
747                         break;
748
749                 case CRYPTO_MD5_HMAC:
750                         axf = &auth_hash_hmac_md5;
751                         goto authcommon;
752                 case CRYPTO_SHA1_HMAC:
753                         axf = &auth_hash_hmac_sha1;
754                         goto authcommon;
755                 case CRYPTO_SHA2_256_HMAC:
756                         axf = &auth_hash_hmac_sha2_256;
757                         goto authcommon;
758                 case CRYPTO_SHA2_384_HMAC:
759                         axf = &auth_hash_hmac_sha2_384;
760                         goto authcommon;
761                 case CRYPTO_SHA2_512_HMAC:
762                         axf = &auth_hash_hmac_sha2_512;
763                         goto authcommon;
764                 case CRYPTO_NULL_HMAC:
765                         axf = &auth_hash_null;
766                         goto authcommon;
767                 case CRYPTO_RIPEMD160_HMAC:
768                         axf = &auth_hash_hmac_ripemd_160;
769                 authcommon:
770                         (*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
771                                                   M_WAITOK);
772                         if ((*swd)->sw_ictx == NULL) {
773                                 swcr_freesession_slot(&swd_base, 0);
774                                 return ENOBUFS;
775                         }
776         
777                         (*swd)->sw_octx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
778                                                   M_WAITOK);
779                         if ((*swd)->sw_octx == NULL) {
780                                 swcr_freesession_slot(&swd_base, 0);
781                                 return ENOBUFS;
782                         }
783         
784                         if (cri->cri_key != NULL) {
785                                 swcr_authprepare(axf, *swd, cri->cri_key,
786                                     cri->cri_klen);
787                         }
788         
789                         (*swd)->sw_mlen = cri->cri_mlen;
790                         (*swd)->sw_axf = axf;
791                         break;
792         
793                 case CRYPTO_MD5_KPDK:
794                         axf = &auth_hash_key_md5;
795                         goto auth2common;
796         
797                 case CRYPTO_SHA1_KPDK:
798                         axf = &auth_hash_key_sha1;
799                 auth2common:
800                         (*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
801                                                   M_WAITOK);
802                         if ((*swd)->sw_ictx == NULL) {
803                                 swcr_freesession_slot(&swd_base, 0);
804                                 return ENOBUFS;
805                         }
806         
807                         (*swd)->sw_octx = kmalloc(cri->cri_klen / 8,
808                                                   M_CRYPTO_DATA, M_WAITOK);
809                         if ((*swd)->sw_octx == NULL) {
810                                 swcr_freesession_slot(&swd_base, 0);
811                                 return ENOBUFS;
812                         }
813         
814                         /* Store the key so we can "append" it to the payload */
815                         if (cri->cri_key != NULL) {
816                                 swcr_authprepare(axf, *swd, cri->cri_key,
817                                     cri->cri_klen);
818                         }
819
820                         (*swd)->sw_mlen = cri->cri_mlen;
821                         (*swd)->sw_axf = axf;
822                         break;
823 #ifdef notdef
824                 case CRYPTO_MD5:
825                         axf = &auth_hash_md5;
826                         goto auth3common;
827
828                 case CRYPTO_SHA1:
829                         axf = &auth_hash_sha1;
830                 auth3common:
831                         (*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
832                                                   M_WAITOK);
833                         if ((*swd)->sw_ictx == NULL) {
834                                 swcr_freesession_slot(&swd_base, 0);
835                                 return ENOBUFS;
836                         }
837
838                         axf->Init((*swd)->sw_ictx);
839                         (*swd)->sw_mlen = cri->cri_mlen;
840                         (*swd)->sw_axf = axf;
841                         break;
842 #endif
843                 case CRYPTO_DEFLATE_COMP:
844                         cxf = &comp_algo_deflate;
845                         (*swd)->sw_cxf = cxf;
846                         break;
847                 default:
848                         swcr_freesession_slot(&swd_base, 0);
849                         return EINVAL;
850                 }
851         
852                 (*swd)->sw_alg = cri->cri_alg;
853                 cri = cri->cri_next;
854                 swd = &((*swd)->sw_next);
855         }
856
857         for (;;) {
858                 /*
859                  * Atomically allocate a session
860                  */
861                 spin_lock_wr(&swcr_spin);
862                 for (i = swcr_minsesnum; i < swcr_sesnum; ++i) {
863                         if (swcr_sessions[i] == NULL)
864                                 break;
865                 }
866                 if (i < swcr_sesnum) {
867                         swcr_sessions[i] = swd_base;
868                         swcr_minsesnum = i + 1;
869                         spin_unlock_wr(&swcr_spin);
870                         break;
871                 }
872                 n = swcr_sesnum;
873                 spin_unlock_wr(&swcr_spin);
874
875                 /*
876                  * A larger allocation is required, reallocate the array
877                  * and replace, checking for SMP races.
878                  */
879                 if (n < CRYPTO_SW_SESSIONS)
880                         n = CRYPTO_SW_SESSIONS;
881                 else
882                         n = n * 3 / 2;
883                 swd = kmalloc(n * sizeof(struct swcr_data *),
884                               M_CRYPTO_DATA, M_WAITOK | M_ZERO);
885
886                 spin_lock_wr(&swcr_spin);
887                 if (swcr_sesnum >= n) {
888                         spin_unlock_wr(&swcr_spin);
889                         kfree(swd, M_CRYPTO_DATA);
890                 } else if (swcr_sesnum) {
891                         bcopy(swcr_sessions, swd,
892                               swcr_sesnum * sizeof(struct swcr_data *));
893                         oswd = swcr_sessions;
894                         swcr_sessions = swd;
895                         swcr_sesnum = n;
896                         spin_unlock_wr(&swcr_spin);
897                         kfree(oswd, M_CRYPTO_DATA);
898                 } else {
899                         swcr_sessions = swd;
900                         swcr_sesnum = n;
901                         spin_unlock_wr(&swcr_spin);
902                 }
903         }
904
905         *sid = i;
906         return 0;
907 }
908
909 /*
910  * Free a session.
911  */
912 static int
913 swcr_freesession(device_t dev, u_int64_t tid)
914 {
915         u_int32_t sid = CRYPTO_SESID2LID(tid);
916
917         if (sid > swcr_sesnum || swcr_sessions == NULL ||
918             swcr_sessions[sid] == NULL) {
919                 return EINVAL;
920         }
921
922         /* Silently accept and return */
923         if (sid == 0)
924                 return 0;
925
926         return(swcr_freesession_slot(&swcr_sessions[sid], sid));
927 }
928
929 static
930 int
931 swcr_freesession_slot(struct swcr_data **swdp, u_int32_t sid)
932 {
933         struct enc_xform *txf;
934         struct auth_hash *axf;
935         struct comp_algo *cxf;
936         struct swcr_data *swd;
937         struct swcr_data *swnext;
938
939         /*
940          * Protect session detachment with the spinlock.
941          */
942         spin_lock_wr(&swcr_spin);
943         swnext = *swdp;
944         *swdp = NULL;
945         if (sid && swcr_minsesnum > sid)
946                 swcr_minsesnum = sid;
947         spin_unlock_wr(&swcr_spin);
948
949         /*
950          * Clean up at our leisure.
951          */
952         while ((swd = swnext) != NULL) {
953                 swnext = swd->sw_next;
954
955                 swd->sw_next = NULL;
956
957                 switch (swd->sw_alg) {
958                 case CRYPTO_DES_CBC:
959                 case CRYPTO_3DES_CBC:
960                 case CRYPTO_BLF_CBC:
961                 case CRYPTO_CAST_CBC:
962                 case CRYPTO_SKIPJACK_CBC:
963                 case CRYPTO_RIJNDAEL128_CBC:
964                 case CRYPTO_AES_XTS:
965                 case CRYPTO_AES_CTR:
966                 case CRYPTO_CAMELLIA_CBC:
967                 case CRYPTO_NULL_CBC:
968                         txf = swd->sw_exf;
969
970                         if (swd->sw_kschedule)
971                                 txf->zerokey(&(swd->sw_kschedule));
972                         break;
973
974                 case CRYPTO_MD5_HMAC:
975                 case CRYPTO_SHA1_HMAC:
976                 case CRYPTO_SHA2_256_HMAC:
977                 case CRYPTO_SHA2_384_HMAC:
978                 case CRYPTO_SHA2_512_HMAC:
979                 case CRYPTO_RIPEMD160_HMAC:
980                 case CRYPTO_NULL_HMAC:
981                         axf = swd->sw_axf;
982
983                         if (swd->sw_ictx) {
984                                 bzero(swd->sw_ictx, axf->ctxsize);
985                                 kfree(swd->sw_ictx, M_CRYPTO_DATA);
986                         }
987                         if (swd->sw_octx) {
988                                 bzero(swd->sw_octx, axf->ctxsize);
989                                 kfree(swd->sw_octx, M_CRYPTO_DATA);
990                         }
991                         break;
992
993                 case CRYPTO_MD5_KPDK:
994                 case CRYPTO_SHA1_KPDK:
995                         axf = swd->sw_axf;
996
997                         if (swd->sw_ictx) {
998                                 bzero(swd->sw_ictx, axf->ctxsize);
999                                 kfree(swd->sw_ictx, M_CRYPTO_DATA);
1000                         }
1001                         if (swd->sw_octx) {
1002                                 bzero(swd->sw_octx, swd->sw_klen);
1003                                 kfree(swd->sw_octx, M_CRYPTO_DATA);
1004                         }
1005                         break;
1006
1007                 case CRYPTO_MD5:
1008                 case CRYPTO_SHA1:
1009                         axf = swd->sw_axf;
1010
1011                         if (swd->sw_ictx)
1012                                 kfree(swd->sw_ictx, M_CRYPTO_DATA);
1013                         break;
1014
1015                 case CRYPTO_DEFLATE_COMP:
1016                         cxf = swd->sw_cxf;
1017                         break;
1018                 }
1019
1020                 //FREE(swd, M_CRYPTO_DATA);
1021                 kfree(swd, M_CRYPTO_DATA);
1022         }
1023         return 0;
1024 }
1025
1026 /*
1027  * Process a software request.
1028  */
1029 static int
1030 swcr_process(device_t dev, struct cryptop *crp, int hint)
1031 {
1032         struct cryptodesc *crd;
1033         struct swcr_data *sw;
1034         u_int32_t lid;
1035
1036         /* Sanity check */
1037         if (crp == NULL)
1038                 return EINVAL;
1039
1040         if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
1041                 crp->crp_etype = EINVAL;
1042                 goto done;
1043         }
1044
1045         lid = crp->crp_sid & 0xffffffff;
1046         if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
1047                 crp->crp_etype = ENOENT;
1048                 goto done;
1049         }
1050
1051         /* Go through crypto descriptors, processing as we go */
1052         for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1053                 /*
1054                  * Find the crypto context.
1055                  *
1056                  * XXX Note that the logic here prevents us from having
1057                  * XXX the same algorithm multiple times in a session
1058                  * XXX (or rather, we can but it won't give us the right
1059                  * XXX results). To do that, we'd need some way of differentiating
1060                  * XXX between the various instances of an algorithm (so we can
1061                  * XXX locate the correct crypto context).
1062                  */
1063                 for (sw = swcr_sessions[lid];
1064                     sw && sw->sw_alg != crd->crd_alg;
1065                     sw = sw->sw_next)
1066                         ;
1067
1068                 /* No such context ? */
1069                 if (sw == NULL) {
1070                         crp->crp_etype = EINVAL;
1071                         goto done;
1072                 }
1073                 switch (sw->sw_alg) {
1074                 case CRYPTO_DES_CBC:
1075                 case CRYPTO_3DES_CBC:
1076                 case CRYPTO_BLF_CBC:
1077                 case CRYPTO_CAST_CBC:
1078                 case CRYPTO_SKIPJACK_CBC:
1079                 case CRYPTO_RIJNDAEL128_CBC:
1080                 case CRYPTO_AES_XTS:
1081                 case CRYPTO_AES_CTR:
1082                 case CRYPTO_CAMELLIA_CBC:
1083                         if ((crp->crp_etype = swcr_encdec(crd, sw,
1084                             crp->crp_buf, crp->crp_flags)) != 0)
1085                                 goto done;
1086                         break;
1087                 case CRYPTO_NULL_CBC:
1088                         crp->crp_etype = 0;
1089                         break;
1090                 case CRYPTO_MD5_HMAC:
1091                 case CRYPTO_SHA1_HMAC:
1092                 case CRYPTO_SHA2_256_HMAC:
1093                 case CRYPTO_SHA2_384_HMAC:
1094                 case CRYPTO_SHA2_512_HMAC:
1095                 case CRYPTO_RIPEMD160_HMAC:
1096                 case CRYPTO_NULL_HMAC:
1097                 case CRYPTO_MD5_KPDK:
1098                 case CRYPTO_SHA1_KPDK:
1099                 case CRYPTO_MD5:
1100                 case CRYPTO_SHA1:
1101                         if ((crp->crp_etype = swcr_authcompute(crd, sw,
1102                             crp->crp_buf, crp->crp_flags)) != 0)
1103                                 goto done;
1104                         break;
1105
1106                 case CRYPTO_DEFLATE_COMP:
1107                         if ((crp->crp_etype = swcr_compdec(crd, sw, 
1108                             crp->crp_buf, crp->crp_flags)) != 0)
1109                                 goto done;
1110                         else
1111                                 crp->crp_olen = (int)sw->sw_size;
1112                         break;
1113
1114                 default:
1115                         /* Unknown/unsupported algorithm */
1116                         crp->crp_etype = EINVAL;
1117                         goto done;
1118                 }
1119         }
1120
1121 done:
1122         crypto_done(crp);
1123         return 0;
1124 }
1125
1126 static void
1127 swcr_identify(driver_t *drv, device_t parent)
1128 {
1129         /* NB: order 10 is so we get attached after h/w devices */
1130         /* XXX: wouldn't bet about this BUS_ADD_CHILD correctness */
1131         if (device_find_child(parent, "cryptosoft", -1) == NULL &&
1132             BUS_ADD_CHILD(parent, parent, 10, "cryptosoft", -1) == 0)
1133                 panic("cryptosoft: could not attach");
1134 }
1135
1136 static int
1137 swcr_probe(device_t dev)
1138 {
1139         device_set_desc(dev, "software crypto");
1140         return (0);
1141 }
1142
1143 static int
1144 swcr_attach(device_t dev)
1145 {
1146         memset(hmac_ipad_buffer, HMAC_IPAD_VAL, HMAC_MAX_BLOCK_LEN);
1147         memset(hmac_opad_buffer, HMAC_OPAD_VAL, HMAC_MAX_BLOCK_LEN);
1148
1149         swcr_id = crypto_get_driverid(dev, CRYPTOCAP_F_SOFTWARE |
1150                                            CRYPTOCAP_F_SYNC |
1151                                            CRYPTOCAP_F_SMP);
1152         if (swcr_id < 0) {
1153                 device_printf(dev, "cannot initialize!");
1154                 return ENOMEM;
1155         }
1156 #define REGISTER(alg) \
1157         crypto_register(swcr_id, alg, 0,0)
1158         REGISTER(CRYPTO_DES_CBC);
1159         REGISTER(CRYPTO_3DES_CBC);
1160         REGISTER(CRYPTO_BLF_CBC);
1161         REGISTER(CRYPTO_CAST_CBC);
1162         REGISTER(CRYPTO_SKIPJACK_CBC);
1163         REGISTER(CRYPTO_NULL_CBC);
1164         REGISTER(CRYPTO_MD5_HMAC);
1165         REGISTER(CRYPTO_SHA1_HMAC);
1166         REGISTER(CRYPTO_SHA2_256_HMAC);
1167         REGISTER(CRYPTO_SHA2_384_HMAC);
1168         REGISTER(CRYPTO_SHA2_512_HMAC);
1169         REGISTER(CRYPTO_RIPEMD160_HMAC);
1170         REGISTER(CRYPTO_NULL_HMAC);
1171         REGISTER(CRYPTO_MD5_KPDK);
1172         REGISTER(CRYPTO_SHA1_KPDK);
1173         REGISTER(CRYPTO_MD5);
1174         REGISTER(CRYPTO_SHA1);
1175         REGISTER(CRYPTO_RIJNDAEL128_CBC);
1176         REGISTER(CRYPTO_AES_XTS);
1177         REGISTER(CRYPTO_AES_CTR);
1178         REGISTER(CRYPTO_CAMELLIA_CBC);
1179         REGISTER(CRYPTO_DEFLATE_COMP);
1180 #undef REGISTER
1181
1182         return 0;
1183 }
1184
1185 static int
1186 swcr_detach(device_t dev)
1187 {
1188         crypto_unregister_all(swcr_id);
1189         if (swcr_sessions != NULL)
1190                 kfree(swcr_sessions, M_CRYPTO_DATA);
1191         return 0;
1192 }
1193
1194 static device_method_t swcr_methods[] = {
1195         DEVMETHOD(device_identify,      swcr_identify),
1196         DEVMETHOD(device_probe,         swcr_probe),
1197         DEVMETHOD(device_attach,        swcr_attach),
1198         DEVMETHOD(device_detach,        swcr_detach),
1199
1200         DEVMETHOD(cryptodev_newsession, swcr_newsession),
1201         DEVMETHOD(cryptodev_freesession,swcr_freesession),
1202         DEVMETHOD(cryptodev_process,    swcr_process),
1203
1204         {0, 0},
1205 };
1206
1207 static driver_t swcr_driver = {
1208         "cryptosoft",
1209         swcr_methods,
1210         0,              /* NB: no softc */
1211 };
1212 static devclass_t swcr_devclass;
1213
1214 /*
1215  * NB: We explicitly reference the crypto module so we
1216  * get the necessary ordering when built as a loadable
1217  * module.  This is required because we bundle the crypto
1218  * module code together with the cryptosoft driver (otherwise
1219  * normal module dependencies would handle things).
1220  */
1221 extern int crypto_modevent(struct module *, int, void *);
1222 /* XXX where to attach */
1223 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0);
1224 MODULE_VERSION(cryptosoft, 1);
1225 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);