2 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
3 * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
5 * This code was written by Angelos D. Keromytis in Athens, Greece, in
6 * February 2000. Network Security Technologies Inc. (NSTI) kindly
7 * supported the development of this code.
9 * Copyright (c) 2000, 2001 Angelos D. Keromytis
11 * SMP modifications by Matthew Dillon for the DragonFlyBSD Project
13 * Permission to use, copy, and modify this software with or without fee
14 * is hereby granted, provided that this entire notice is included in
15 * all source code copies of any software which is or includes a copy or
16 * modification of this software.
18 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
19 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
20 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
21 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
24 * $FreeBSD: src/sys/opencrypto/cryptosoft.c,v 1.23 2009/02/05 17:43:12 imp Exp $
25 * $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/malloc.h>
32 #include <sys/module.h>
33 #include <sys/sysctl.h>
34 #include <sys/errno.h>
35 #include <sys/endian.h>
36 #include <sys/random.h>
37 #include <sys/kernel.h>
39 #include <sys/spinlock2.h>
41 #include <crypto/blowfish/blowfish.h>
42 #include <crypto/sha1.h>
43 #include <opencrypto/rmd160.h>
44 #include <opencrypto/cast.h>
45 #include <opencrypto/skipjack.h>
48 #include <opencrypto/cryptodev.h>
49 #include <opencrypto/cryptosoft.h>
50 #include <opencrypto/xform.h>
54 #include "cryptodev_if.h"
56 static int32_t swcr_id;
57 static struct swcr_data **swcr_sessions = NULL;
58 static u_int32_t swcr_sesnum;
59 static u_int32_t swcr_minsesnum = 1;
61 static struct spinlock swcr_spin = SPINLOCK_INITIALIZER(swcr_spin);
63 u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN];
64 u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN];
66 static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
67 static int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int);
68 static int swcr_combined(struct cryptop *);
69 static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
70 static int swcr_freesession(device_t dev, u_int64_t tid);
71 static int swcr_freesession_slot(struct swcr_data **swdp, u_int32_t sid);
74 * Apply a symmetric encryption/decryption algorithm.
77 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
80 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
81 unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
84 struct enc_xform *exf;
85 int i, k, j, blks, ivlen;
87 int explicit_kschedule;
90 blks = exf->blocksize;
93 /* Check for non-padded data */
94 if (crd->crd_len % blks)
97 /* Initialize the IV */
98 if (crd->crd_flags & CRD_F_ENCRYPT) {
99 /* IV explicitly provided ? */
100 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
101 bcopy(crd->crd_iv, iv, ivlen);
103 karc4rand(iv, ivlen);
105 /* Do we need to write the IV */
106 if (!(crd->crd_flags & CRD_F_IV_PRESENT))
107 crypto_copyback(flags, buf, crd->crd_inject, ivlen, iv);
109 } else { /* Decryption */
110 /* IV explicitly provided ? */
111 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
112 bcopy(crd->crd_iv, iv, ivlen);
115 crypto_copydata(flags, buf, crd->crd_inject, ivlen, iv);
122 * The semantics are seriously broken because the session key
123 * storage was never designed for concurrent ops.
125 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
127 explicit_kschedule = 1;
128 error = exf->setkey(&kschedule,
129 crd->crd_key, crd->crd_klen / 8);
133 spin_lock(&swcr_spin);
134 kschedule = sw->sw_kschedule;
135 ++sw->sw_kschedule_refs;
136 spin_unlock(&swcr_spin);
137 explicit_kschedule = 0;
141 * xforms that provide a reinit method perform all IV
142 * handling themselves.
145 exf->reinit(kschedule, iv);
147 if (flags & CRYPTO_F_IMBUF) {
148 struct mbuf *m = (struct mbuf *) buf;
150 /* Find beginning of data */
151 m = m_getptr(m, crd->crd_skip, &k);
161 * If there's insufficient data at the end of
162 * an mbuf, we have to do some copying.
164 if (m->m_len < k + blks && m->m_len != k) {
165 m_copydata(m, k, blks, blk);
167 /* Actual encryption/decryption */
169 if (crd->crd_flags & CRD_F_ENCRYPT) {
170 exf->encrypt(kschedule,
173 exf->decrypt(kschedule,
176 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
177 /* XOR with previous block */
178 for (j = 0; j < blks; j++)
181 exf->encrypt(kschedule, blk, iv);
184 * Keep encrypted block for XOR'ing
187 bcopy(blk, iv, blks);
189 } else { /* decrypt */
191 * Keep encrypted block for XOR'ing
194 nivp = (ivp == iv) ? iv2 : iv;
195 bcopy(blk, nivp, blks);
197 exf->decrypt(kschedule, blk, iv);
199 /* XOR with previous block */
200 for (j = 0; j < blks; j++)
206 /* Copy back decrypted block */
207 m_copyback(m, k, blks, blk);
209 /* Advance pointer */
210 m = m_getptr(m, k + blks, &k);
218 /* Could be done... */
223 /* Skip possibly empty mbufs */
225 for (m = m->m_next; m && m->m_len == 0;
238 * Warning: idat may point to garbage here, but
239 * we only use it in the while() loop, only if
240 * there are indeed enough data.
242 idat = mtod(m, unsigned char *) + k;
244 while (m->m_len >= k + blks && i > 0) {
246 if (crd->crd_flags & CRD_F_ENCRYPT) {
247 exf->encrypt(kschedule,
250 exf->decrypt(kschedule,
253 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
254 /* XOR with previous block/IV */
255 for (j = 0; j < blks; j++)
258 exf->encrypt(kschedule, idat, iv);
260 } else { /* decrypt */
262 * Keep encrypted block to be used
263 * in next block's processing.
265 nivp = (ivp == iv) ? iv2 : iv;
266 bcopy(idat, nivp, blks);
268 exf->decrypt(kschedule, idat, iv);
270 /* XOR with previous block/IV */
271 for (j = 0; j < blks; j++)
282 error = 0; /* Done with mbuf encryption/decryption */
283 } else if (flags & CRYPTO_F_IOV) {
284 struct uio *uio = (struct uio *) buf;
287 /* Find beginning of data */
288 iov = cuio_getptr(uio, crd->crd_skip, &k);
298 * If there's insufficient data at the end of
299 * an iovec, we have to do some copying.
301 if (iov->iov_len < k + blks && iov->iov_len != k) {
302 cuio_copydata(uio, k, blks, blk);
304 /* Actual encryption/decryption */
306 if (crd->crd_flags & CRD_F_ENCRYPT) {
307 exf->encrypt(kschedule,
310 exf->decrypt(kschedule,
313 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
314 /* XOR with previous block */
315 for (j = 0; j < blks; j++)
318 exf->encrypt(kschedule, blk, iv);
321 * Keep encrypted block for XOR'ing
324 bcopy(blk, iv, blks);
326 } else { /* decrypt */
328 * Keep encrypted block for XOR'ing
331 nivp = (ivp == iv) ? iv2 : iv;
332 bcopy(blk, nivp, blks);
334 exf->decrypt(kschedule, blk, iv);
336 /* XOR with previous block */
337 for (j = 0; j < blks; j++)
343 /* Copy back decrypted block */
344 cuio_copyback(uio, k, blks, blk);
346 /* Advance pointer */
347 iov = cuio_getptr(uio, k + blks, &k);
355 /* Could be done... */
361 * Warning: idat may point to garbage here, but
362 * we only use it in the while() loop, only if
363 * there are indeed enough data.
365 idat = (char *)iov->iov_base + k;
367 while (iov->iov_len >= k + blks && i > 0) {
369 if (crd->crd_flags & CRD_F_ENCRYPT) {
370 exf->encrypt(kschedule,
373 exf->decrypt(kschedule,
376 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
377 /* XOR with previous block/IV */
378 for (j = 0; j < blks; j++)
381 exf->encrypt(kschedule, idat, iv);
383 } else { /* decrypt */
385 * Keep encrypted block to be used
386 * in next block's processing.
388 nivp = (ivp == iv) ? iv2 : iv;
389 bcopy(idat, nivp, blks);
391 exf->decrypt(kschedule, idat, iv);
393 /* XOR with previous block/IV */
394 for (j = 0; j < blks; j++)
404 if (k == iov->iov_len) {
409 error = 0; /* Done with iovec encryption/decryption */
415 for(i = crd->crd_skip;
416 i < crd->crd_skip + crd->crd_len; i += blks) {
417 if (crd->crd_flags & CRD_F_ENCRYPT) {
418 exf->encrypt(kschedule, buf + i, iv);
420 exf->decrypt(kschedule, buf + i, iv);
423 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
424 for (i = crd->crd_skip;
425 i < crd->crd_skip + crd->crd_len; i += blks) {
426 /* XOR with the IV/previous block, as appropriate. */
427 if (i == crd->crd_skip)
428 for (k = 0; k < blks; k++)
429 buf[i + k] ^= ivp[k];
431 for (k = 0; k < blks; k++)
432 buf[i + k] ^= buf[i + k - blks];
433 exf->encrypt(kschedule, buf + i, iv);
435 } else { /* Decrypt */
437 * Start at the end, so we don't need to keep the
438 * encrypted block as the IV for the next block.
440 for (i = crd->crd_skip + crd->crd_len - blks;
441 i >= crd->crd_skip; i -= blks) {
442 exf->decrypt(kschedule, buf + i, iv);
444 /* XOR with the IV/previous block, as appropriate */
445 if (i == crd->crd_skip)
446 for (k = 0; k < blks; k++)
447 buf[i + k] ^= ivp[k];
449 for (k = 0; k < blks; k++)
450 buf[i + k] ^= buf[i + k - blks];
453 error = 0; /* Done w/contiguous buffer encrypt/decrypt */
457 * Cleanup - explicitly replace the session key if requested
458 * (horrible semantics for concurrent operation)
460 if (explicit_kschedule) {
461 spin_lock(&swcr_spin);
462 if (sw->sw_kschedule && sw->sw_kschedule_refs == 0) {
463 okschedule = sw->sw_kschedule;
464 sw->sw_kschedule = kschedule;
468 spin_unlock(&swcr_spin);
470 exf->zerokey(&okschedule);
472 spin_lock(&swcr_spin);
473 --sw->sw_kschedule_refs;
474 spin_unlock(&swcr_spin);
480 swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key,
488 case CRYPTO_MD5_HMAC:
489 case CRYPTO_SHA1_HMAC:
490 case CRYPTO_SHA2_256_HMAC:
491 case CRYPTO_SHA2_384_HMAC:
492 case CRYPTO_SHA2_512_HMAC:
493 case CRYPTO_NULL_HMAC:
494 case CRYPTO_RIPEMD160_HMAC:
495 for (k = 0; k < klen; k++)
496 key[k] ^= HMAC_IPAD_VAL;
498 axf->Init(sw->sw_ictx);
499 axf->Update(sw->sw_ictx, key, klen);
500 axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen);
502 for (k = 0; k < klen; k++)
503 key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
505 axf->Init(sw->sw_octx);
506 axf->Update(sw->sw_octx, key, klen);
507 axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen);
509 for (k = 0; k < klen; k++)
510 key[k] ^= HMAC_OPAD_VAL;
512 case CRYPTO_MD5_KPDK:
513 case CRYPTO_SHA1_KPDK:
515 /* We need a buffer that can hold an md5 and a sha1 result. */
516 u_char buf[SHA1_RESULTLEN];
519 bcopy(key, sw->sw_octx, klen);
520 axf->Init(sw->sw_ictx);
521 axf->Update(sw->sw_ictx, key, klen);
522 axf->Final(buf, sw->sw_ictx);
526 kprintf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d "
527 "doesn't use keys.\n", __func__, axf->type);
532 * Compute keyed-hash authenticator.
535 swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
538 unsigned char aalg[HASH_MAX_LEN];
539 struct auth_hash *axf;
543 if (sw->sw_ictx == 0)
548 if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
549 swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen);
551 bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
553 err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len,
554 (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx);
558 switch (sw->sw_alg) {
559 case CRYPTO_MD5_HMAC:
560 case CRYPTO_SHA1_HMAC:
561 case CRYPTO_SHA2_256_HMAC:
562 case CRYPTO_SHA2_384_HMAC:
563 case CRYPTO_SHA2_512_HMAC:
564 case CRYPTO_RIPEMD160_HMAC:
565 if (sw->sw_octx == NULL)
568 axf->Final(aalg, &ctx);
569 bcopy(sw->sw_octx, &ctx, axf->ctxsize);
570 axf->Update(&ctx, aalg, axf->hashsize);
571 axf->Final(aalg, &ctx);
574 case CRYPTO_MD5_KPDK:
575 case CRYPTO_SHA1_KPDK:
576 if (sw->sw_octx == NULL)
579 axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
580 axf->Final(aalg, &ctx);
583 case CRYPTO_NULL_HMAC:
584 axf->Final(aalg, &ctx);
588 /* Inject the authentication data */
589 crypto_copyback(flags, buf, crd->crd_inject,
590 sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg);
595 * Apply a combined encryption-authentication transformation
598 swcr_combined(struct cryptop *crp)
600 uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
601 u_char *blk = (u_char *)blkbuf;
602 u_char aalg[HASH_MAX_LEN];
603 u_char iv[EALG_MAX_BLOCK_LEN];
606 struct cryptodesc *crd, *crda = NULL, *crde = NULL;
607 struct swcr_data *sw, *swa, *swe;
608 struct auth_hash *axf = NULL;
609 struct enc_xform *exf = NULL;
610 struct mbuf *m = NULL;
611 struct uio *uio = NULL;
612 caddr_t buf = (caddr_t)crp->crp_buf;
614 int i, blksz, ivlen, outtype, len;
619 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
620 for (sw = swcr_sessions[crp->crp_sid & 0xffffffff];
621 sw && sw->sw_alg != crd->crd_alg;
627 switch (sw->sw_alg) {
628 case CRYPTO_AES_GCM_16:
629 case CRYPTO_AES_GMAC:
635 case CRYPTO_AES_128_GMAC:
636 case CRYPTO_AES_192_GMAC:
637 case CRYPTO_AES_256_GMAC:
641 if (swa->sw_ictx == 0)
643 bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
644 blksz = axf->blocksize;
650 if (crde == NULL || crda == NULL)
653 if (crp->crp_flags & CRYPTO_F_IMBUF) {
654 outtype = CRYPTO_BUF_MBUF;
655 m = (struct mbuf *)buf;
657 outtype = CRYPTO_BUF_IOV;
658 uio = (struct uio *)buf;
661 /* Initialize the IV */
662 if (crde->crd_flags & CRD_F_ENCRYPT) {
663 /* IV explicitly provided ? */
664 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
665 bcopy(crde->crd_iv, iv, ivlen);
667 karc4rand(iv, ivlen);
669 /* Do we need to write the IV */
670 if (!(crde->crd_flags & CRD_F_IV_PRESENT))
671 crypto_copyback(crde->crd_flags, buf, crde->crd_inject,
674 } else { /* Decryption */
675 /* IV explicitly provided ? */
676 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
677 bcopy(crde->crd_iv, iv, ivlen);
680 crypto_copydata(crde->crd_flags, buf, crde->crd_inject,
684 /* Supply MAC with IV */
686 axf->Reinit(&ctx, iv, ivlen);
688 /* Supply MAC with AAD */
689 for (i = 0; i < crda->crd_len; i += blksz) {
690 len = MIN(crda->crd_len - i, blksz);
691 crypto_copydata(crde->crd_flags, buf, crda->crd_skip + i, len,
693 axf->Update(&ctx, blk, len);
696 spin_lock(&swcr_spin);
697 kschedule = sw->sw_kschedule;
698 ++sw->sw_kschedule_refs;
699 spin_unlock(&swcr_spin);
702 exf->reinit(kschedule, iv);
704 /* Do encryption/decryption with MAC */
705 for (i = 0; i < crde->crd_len; i += blksz) {
706 len = MIN(crde->crd_len - i, blksz);
709 crypto_copydata(crde->crd_flags, buf, crde->crd_skip + i, len,
711 if (crde->crd_flags & CRD_F_ENCRYPT) {
712 exf->encrypt(kschedule, blk, iv);
713 axf->Update(&ctx, blk, len);
715 axf->Update(&ctx, blk, len);
716 exf->decrypt(kschedule, blk, iv);
718 crypto_copyback(crde->crd_flags, buf, crde->crd_skip + i, len,
722 /* Do any required special finalization */
723 switch (crda->crd_alg) {
724 case CRYPTO_AES_128_GMAC:
725 case CRYPTO_AES_192_GMAC:
726 case CRYPTO_AES_256_GMAC:
729 blkp = (uint32_t *)blk + 1;
730 *blkp = htobe32(crda->crd_len * 8);
731 blkp = (uint32_t *)blk + 3;
732 *blkp = htobe32(crde->crd_len * 8);
733 axf->Update(&ctx, blk, blksz);
738 axf->Final(aalg, &ctx);
740 /* Inject the authentication data */
741 crypto_copyback(crda->crd_flags, crp->crp_buf, crda->crd_inject,
742 axf->blocksize, aalg);
744 spin_lock(&swcr_spin);
745 --sw->sw_kschedule_refs;
746 spin_unlock(&swcr_spin);
752 * Apply a compression/decompression algorithm
755 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
756 caddr_t buf, int flags)
758 u_int8_t *data, *out;
759 struct comp_algo *cxf;
766 * We must handle the whole buffer of data in one time
767 * then if there is not all the data in the mbuf, we must
770 data = kmalloc(crd->crd_len, M_CRYPTO_DATA, M_INTWAIT);
773 crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data);
775 if (crd->crd_flags & CRD_F_COMP)
776 result = cxf->compress(data, crd->crd_len, &out);
778 result = cxf->decompress(data, crd->crd_len, &out);
780 kfree(data, M_CRYPTO_DATA);
784 /* Copy back the (de)compressed data. m_copyback is
785 * extending the mbuf as necessary.
787 sw->sw_size = result;
788 /* Check the compressed size when doing compression */
789 if (crd->crd_flags & CRD_F_COMP) {
790 if (result >= crd->crd_len) {
791 /* Compression was useless, we lost time */
792 kfree(out, M_CRYPTO_DATA);
797 crypto_copyback(flags, buf, crd->crd_skip, result, out);
798 if (result < crd->crd_len) {
799 adj = result - crd->crd_len;
800 if (flags & CRYPTO_F_IMBUF) {
801 adj = result - crd->crd_len;
802 m_adj((struct mbuf *)buf, adj);
803 } else if (flags & CRYPTO_F_IOV) {
804 struct uio *uio = (struct uio *)buf;
807 adj = crd->crd_len - result;
808 ind = uio->uio_iovcnt - 1;
810 while (adj > 0 && ind >= 0) {
811 if (adj < uio->uio_iov[ind].iov_len) {
812 uio->uio_iov[ind].iov_len -= adj;
816 adj -= uio->uio_iov[ind].iov_len;
817 uio->uio_iov[ind].iov_len = 0;
823 kfree(out, M_CRYPTO_DATA);
828 * Generate a new software session.
831 swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
833 struct swcr_data *swd_base;
834 struct swcr_data **swd;
835 struct swcr_data **oswd;
836 struct auth_hash *axf;
837 struct enc_xform *txf;
838 struct comp_algo *cxf;
843 if (sid == NULL || cri == NULL)
850 *swd = kmalloc(sizeof(struct swcr_data),
851 M_CRYPTO_DATA, M_WAITOK | M_ZERO);
853 switch (cri->cri_alg) {
855 txf = &enc_xform_des;
857 case CRYPTO_3DES_CBC:
858 txf = &enc_xform_3des;
861 txf = &enc_xform_blf;
863 case CRYPTO_CAST_CBC:
864 txf = &enc_xform_cast5;
866 case CRYPTO_SKIPJACK_CBC:
867 txf = &enc_xform_skipjack;
869 case CRYPTO_RIJNDAEL128_CBC:
870 txf = &enc_xform_rijndael128;
873 txf = &enc_xform_aes_xts;
876 txf = &enc_xform_aes_ctr;
878 case CRYPTO_AES_GCM_16:
879 txf = &enc_xform_aes_gcm;
881 case CRYPTO_AES_GMAC:
882 txf = &enc_xform_aes_gmac;
883 (*swd)->sw_exf = txf;
885 case CRYPTO_CAMELLIA_CBC:
886 txf = &enc_xform_camellia;
888 case CRYPTO_TWOFISH_CBC:
889 txf = &enc_xform_twofish;
891 case CRYPTO_SERPENT_CBC:
892 txf = &enc_xform_serpent;
894 case CRYPTO_TWOFISH_XTS:
895 txf = &enc_xform_twofish_xts;
897 case CRYPTO_SERPENT_XTS:
898 txf = &enc_xform_serpent_xts;
900 case CRYPTO_NULL_CBC:
901 txf = &enc_xform_null;
904 if (cri->cri_key != NULL) {
905 error = txf->setkey(&((*swd)->sw_kschedule),
909 swcr_freesession_slot(&swd_base, 0);
913 (*swd)->sw_exf = txf;
916 case CRYPTO_MD5_HMAC:
917 axf = &auth_hash_hmac_md5;
919 case CRYPTO_SHA1_HMAC:
920 axf = &auth_hash_hmac_sha1;
922 case CRYPTO_SHA2_256_HMAC:
923 axf = &auth_hash_hmac_sha2_256;
925 case CRYPTO_SHA2_384_HMAC:
926 axf = &auth_hash_hmac_sha2_384;
928 case CRYPTO_SHA2_512_HMAC:
929 axf = &auth_hash_hmac_sha2_512;
931 case CRYPTO_NULL_HMAC:
932 axf = &auth_hash_null;
934 case CRYPTO_RIPEMD160_HMAC:
935 axf = &auth_hash_hmac_ripemd_160;
937 (*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
939 if ((*swd)->sw_ictx == NULL) {
940 swcr_freesession_slot(&swd_base, 0);
944 (*swd)->sw_octx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
946 if ((*swd)->sw_octx == NULL) {
947 swcr_freesession_slot(&swd_base, 0);
951 if (cri->cri_key != NULL) {
952 swcr_authprepare(axf, *swd, cri->cri_key,
956 (*swd)->sw_mlen = cri->cri_mlen;
957 (*swd)->sw_axf = axf;
960 case CRYPTO_MD5_KPDK:
961 axf = &auth_hash_key_md5;
964 case CRYPTO_SHA1_KPDK:
965 axf = &auth_hash_key_sha1;
967 (*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
969 if ((*swd)->sw_ictx == NULL) {
970 swcr_freesession_slot(&swd_base, 0);
974 (*swd)->sw_octx = kmalloc(cri->cri_klen / 8,
975 M_CRYPTO_DATA, M_WAITOK);
976 if ((*swd)->sw_octx == NULL) {
977 swcr_freesession_slot(&swd_base, 0);
981 /* Store the key so we can "append" it to the payload */
982 if (cri->cri_key != NULL) {
983 swcr_authprepare(axf, *swd, cri->cri_key,
987 (*swd)->sw_mlen = cri->cri_mlen;
988 (*swd)->sw_axf = axf;
992 axf = &auth_hash_md5;
996 axf = &auth_hash_sha1;
998 (*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
1000 if ((*swd)->sw_ictx == NULL) {
1001 swcr_freesession_slot(&swd_base, 0);
1005 axf->Init((*swd)->sw_ictx);
1006 (*swd)->sw_mlen = cri->cri_mlen;
1007 (*swd)->sw_axf = axf;
1010 case CRYPTO_AES_128_GMAC:
1011 axf = &auth_hash_gmac_aes_128;
1014 case CRYPTO_AES_192_GMAC:
1015 axf = &auth_hash_gmac_aes_192;
1018 case CRYPTO_AES_256_GMAC:
1019 axf = &auth_hash_gmac_aes_256;
1021 (*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
1023 if ((*swd)->sw_ictx == NULL) {
1024 swcr_freesession_slot(&swd_base, 0);
1028 axf->Init((*swd)->sw_ictx);
1029 axf->Setkey((*swd)->sw_ictx, cri->cri_key,
1031 (*swd)->sw_axf = axf;
1034 case CRYPTO_DEFLATE_COMP:
1035 cxf = &comp_algo_deflate;
1036 (*swd)->sw_cxf = cxf;
1039 swcr_freesession_slot(&swd_base, 0);
1043 (*swd)->sw_alg = cri->cri_alg;
1044 cri = cri->cri_next;
1045 swd = &((*swd)->sw_next);
1050 * Atomically allocate a session
1052 spin_lock(&swcr_spin);
1053 for (i = swcr_minsesnum; i < swcr_sesnum; ++i) {
1054 if (swcr_sessions[i] == NULL)
1057 if (i < swcr_sesnum) {
1058 swcr_sessions[i] = swd_base;
1059 swcr_minsesnum = i + 1;
1060 spin_unlock(&swcr_spin);
1064 spin_unlock(&swcr_spin);
1067 * A larger allocation is required, reallocate the array
1068 * and replace, checking for SMP races.
1070 if (n < CRYPTO_SW_SESSIONS)
1071 n = CRYPTO_SW_SESSIONS;
1074 swd = kmalloc(n * sizeof(struct swcr_data *),
1075 M_CRYPTO_DATA, M_WAITOK | M_ZERO);
1077 spin_lock(&swcr_spin);
1078 if (swcr_sesnum >= n) {
1079 spin_unlock(&swcr_spin);
1080 kfree(swd, M_CRYPTO_DATA);
1081 } else if (swcr_sesnum) {
1082 bcopy(swcr_sessions, swd,
1083 swcr_sesnum * sizeof(struct swcr_data *));
1084 oswd = swcr_sessions;
1085 swcr_sessions = swd;
1087 spin_unlock(&swcr_spin);
1088 kfree(oswd, M_CRYPTO_DATA);
1090 swcr_sessions = swd;
1092 spin_unlock(&swcr_spin);
1104 swcr_freesession(device_t dev, u_int64_t tid)
1106 u_int32_t sid = CRYPTO_SESID2LID(tid);
1108 if (sid > swcr_sesnum || swcr_sessions == NULL ||
1109 swcr_sessions[sid] == NULL) {
1113 /* Silently accept and return */
1117 return(swcr_freesession_slot(&swcr_sessions[sid], sid));
1122 swcr_freesession_slot(struct swcr_data **swdp, u_int32_t sid)
1124 struct enc_xform *txf;
1125 struct auth_hash *axf;
1126 struct comp_algo *cxf;
1127 struct swcr_data *swd;
1128 struct swcr_data *swnext;
1131 * Protect session detachment with the spinlock.
1133 spin_lock(&swcr_spin);
1136 if (sid && swcr_minsesnum > sid)
1137 swcr_minsesnum = sid;
1138 spin_unlock(&swcr_spin);
1141 * Clean up at our leisure.
1143 while ((swd = swnext) != NULL) {
1144 swnext = swd->sw_next;
1146 swd->sw_next = NULL;
1148 switch (swd->sw_alg) {
1149 case CRYPTO_DES_CBC:
1150 case CRYPTO_3DES_CBC:
1151 case CRYPTO_BLF_CBC:
1152 case CRYPTO_CAST_CBC:
1153 case CRYPTO_SKIPJACK_CBC:
1154 case CRYPTO_RIJNDAEL128_CBC:
1155 case CRYPTO_AES_XTS:
1156 case CRYPTO_AES_CTR:
1157 case CRYPTO_AES_GCM_16:
1158 case CRYPTO_AES_GMAC:
1159 case CRYPTO_CAMELLIA_CBC:
1160 case CRYPTO_TWOFISH_CBC:
1161 case CRYPTO_SERPENT_CBC:
1162 case CRYPTO_TWOFISH_XTS:
1163 case CRYPTO_SERPENT_XTS:
1164 case CRYPTO_NULL_CBC:
1167 if (swd->sw_kschedule)
1168 txf->zerokey(&(swd->sw_kschedule));
1171 case CRYPTO_MD5_HMAC:
1172 case CRYPTO_SHA1_HMAC:
1173 case CRYPTO_SHA2_256_HMAC:
1174 case CRYPTO_SHA2_384_HMAC:
1175 case CRYPTO_SHA2_512_HMAC:
1176 case CRYPTO_RIPEMD160_HMAC:
1177 case CRYPTO_NULL_HMAC:
1181 bzero(swd->sw_ictx, axf->ctxsize);
1182 kfree(swd->sw_ictx, M_CRYPTO_DATA);
1185 bzero(swd->sw_octx, axf->ctxsize);
1186 kfree(swd->sw_octx, M_CRYPTO_DATA);
1190 case CRYPTO_MD5_KPDK:
1191 case CRYPTO_SHA1_KPDK:
1195 bzero(swd->sw_ictx, axf->ctxsize);
1196 kfree(swd->sw_ictx, M_CRYPTO_DATA);
1199 bzero(swd->sw_octx, swd->sw_klen);
1200 kfree(swd->sw_octx, M_CRYPTO_DATA);
1204 case CRYPTO_AES_128_GMAC:
1205 case CRYPTO_AES_192_GMAC:
1206 case CRYPTO_AES_256_GMAC:
1212 bzero(swd->sw_ictx, axf->ctxsize);
1213 kfree(swd->sw_ictx, M_CRYPTO_DATA);
1217 case CRYPTO_DEFLATE_COMP:
1222 //FREE(swd, M_CRYPTO_DATA);
1223 kfree(swd, M_CRYPTO_DATA);
1229 * Process a software request.
1232 swcr_process(device_t dev, struct cryptop *crp, int hint)
1234 struct cryptodesc *crd;
1235 struct swcr_data *sw;
1242 if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
1243 crp->crp_etype = EINVAL;
1247 lid = crp->crp_sid & 0xffffffff;
1248 if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
1249 crp->crp_etype = ENOENT;
1253 /* Go through crypto descriptors, processing as we go */
1254 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1256 * Find the crypto context.
1258 * XXX Note that the logic here prevents us from having
1259 * XXX the same algorithm multiple times in a session
1260 * XXX (or rather, we can but it won't give us the right
1261 * XXX results). To do that, we'd need some way of differentiating
1262 * XXX between the various instances of an algorithm (so we can
1263 * XXX locate the correct crypto context).
1265 for (sw = swcr_sessions[lid];
1266 sw && sw->sw_alg != crd->crd_alg;
1270 /* No such context ? */
1272 crp->crp_etype = EINVAL;
1275 switch (sw->sw_alg) {
1276 case CRYPTO_DES_CBC:
1277 case CRYPTO_3DES_CBC:
1278 case CRYPTO_BLF_CBC:
1279 case CRYPTO_CAST_CBC:
1280 case CRYPTO_SKIPJACK_CBC:
1281 case CRYPTO_RIJNDAEL128_CBC:
1282 case CRYPTO_AES_XTS:
1283 case CRYPTO_AES_CTR:
1284 case CRYPTO_CAMELLIA_CBC:
1285 case CRYPTO_TWOFISH_CBC:
1286 case CRYPTO_SERPENT_CBC:
1287 case CRYPTO_TWOFISH_XTS:
1288 case CRYPTO_SERPENT_XTS:
1289 if ((crp->crp_etype = swcr_encdec(crd, sw,
1290 crp->crp_buf, crp->crp_flags)) != 0)
1293 case CRYPTO_NULL_CBC:
1296 case CRYPTO_MD5_HMAC:
1297 case CRYPTO_SHA1_HMAC:
1298 case CRYPTO_SHA2_256_HMAC:
1299 case CRYPTO_SHA2_384_HMAC:
1300 case CRYPTO_SHA2_512_HMAC:
1301 case CRYPTO_RIPEMD160_HMAC:
1302 case CRYPTO_NULL_HMAC:
1303 case CRYPTO_MD5_KPDK:
1304 case CRYPTO_SHA1_KPDK:
1307 if ((crp->crp_etype = swcr_authcompute(crd, sw,
1308 crp->crp_buf, crp->crp_flags)) != 0)
1312 case CRYPTO_AES_GCM_16:
1313 case CRYPTO_AES_GMAC:
1314 case CRYPTO_AES_128_GMAC:
1315 case CRYPTO_AES_192_GMAC:
1316 case CRYPTO_AES_256_GMAC:
1317 crp->crp_etype = swcr_combined(crp);
1320 case CRYPTO_DEFLATE_COMP:
1321 if ((crp->crp_etype = swcr_compdec(crd, sw,
1322 crp->crp_buf, crp->crp_flags)) != 0)
1325 crp->crp_olen = (int)sw->sw_size;
1329 /* Unknown/unsupported algorithm */
1330 crp->crp_etype = EINVAL;
1342 swcr_identify(driver_t *drv, device_t parent)
1344 /* NB: order 10 is so we get attached after h/w devices */
1345 /* XXX: wouldn't bet about this BUS_ADD_CHILD correctness */
1346 if (device_find_child(parent, "cryptosoft", -1) == NULL &&
1347 BUS_ADD_CHILD(parent, parent, 10, "cryptosoft", -1) == 0)
1348 panic("cryptosoft: could not attach");
1352 swcr_probe(device_t dev)
1354 device_set_desc(dev, "software crypto");
1359 swcr_attach(device_t dev)
1361 memset(hmac_ipad_buffer, HMAC_IPAD_VAL, HMAC_MAX_BLOCK_LEN);
1362 memset(hmac_opad_buffer, HMAC_OPAD_VAL, HMAC_MAX_BLOCK_LEN);
1364 swcr_id = crypto_get_driverid(dev, CRYPTOCAP_F_SOFTWARE |
1368 device_printf(dev, "cannot initialize!");
1371 #define REGISTER(alg) \
1372 crypto_register(swcr_id, alg, 0,0)
1373 REGISTER(CRYPTO_DES_CBC);
1374 REGISTER(CRYPTO_3DES_CBC);
1375 REGISTER(CRYPTO_BLF_CBC);
1376 REGISTER(CRYPTO_CAST_CBC);
1377 REGISTER(CRYPTO_SKIPJACK_CBC);
1378 REGISTER(CRYPTO_NULL_CBC);
1379 REGISTER(CRYPTO_MD5_HMAC);
1380 REGISTER(CRYPTO_SHA1_HMAC);
1381 REGISTER(CRYPTO_SHA2_256_HMAC);
1382 REGISTER(CRYPTO_SHA2_384_HMAC);
1383 REGISTER(CRYPTO_SHA2_512_HMAC);
1384 REGISTER(CRYPTO_RIPEMD160_HMAC);
1385 REGISTER(CRYPTO_NULL_HMAC);
1386 REGISTER(CRYPTO_MD5_KPDK);
1387 REGISTER(CRYPTO_SHA1_KPDK);
1388 REGISTER(CRYPTO_MD5);
1389 REGISTER(CRYPTO_SHA1);
1390 REGISTER(CRYPTO_RIJNDAEL128_CBC);
1391 REGISTER(CRYPTO_AES_XTS);
1392 REGISTER(CRYPTO_AES_CTR);
1393 REGISTER(CRYPTO_AES_GCM_16);
1394 REGISTER(CRYPTO_AES_GMAC);
1395 REGISTER(CRYPTO_AES_128_GMAC);
1396 REGISTER(CRYPTO_AES_192_GMAC);
1397 REGISTER(CRYPTO_AES_256_GMAC);
1398 REGISTER(CRYPTO_CAMELLIA_CBC);
1399 REGISTER(CRYPTO_TWOFISH_CBC);
1400 REGISTER(CRYPTO_SERPENT_CBC);
1401 REGISTER(CRYPTO_TWOFISH_XTS);
1402 REGISTER(CRYPTO_SERPENT_XTS);
1403 REGISTER(CRYPTO_DEFLATE_COMP);
1410 swcr_detach(device_t dev)
1412 crypto_unregister_all(swcr_id);
1413 if (swcr_sessions != NULL)
1414 kfree(swcr_sessions, M_CRYPTO_DATA);
1418 static device_method_t swcr_methods[] = {
1419 DEVMETHOD(device_identify, swcr_identify),
1420 DEVMETHOD(device_probe, swcr_probe),
1421 DEVMETHOD(device_attach, swcr_attach),
1422 DEVMETHOD(device_detach, swcr_detach),
1424 DEVMETHOD(cryptodev_newsession, swcr_newsession),
1425 DEVMETHOD(cryptodev_freesession,swcr_freesession),
1426 DEVMETHOD(cryptodev_process, swcr_process),
1431 static driver_t swcr_driver = {
1434 0, /* NB: no softc */
1436 static devclass_t swcr_devclass;
1439 * NB: We explicitly reference the crypto module so we
1440 * get the necessary ordering when built as a loadable
1441 * module. This is required because we bundle the crypto
1442 * module code together with the cryptosoft driver (otherwise
1443 * normal module dependencies would handle things).
1445 extern int crypto_modevent(struct module *, int, void *);
1446 /* XXX where to attach */
1447 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,NULL);
1448 MODULE_VERSION(cryptosoft, 1);
1449 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);