2 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
3 * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
5 * This code was written by Angelos D. Keromytis in Athens, Greece, in
6 * February 2000. Network Security Technologies Inc. (NSTI) kindly
7 * supported the development of this code.
9 * Copyright (c) 2000, 2001 Angelos D. Keromytis
11 * SMP modifications by Matthew Dillon for the DragonFlyBSD Project
13 * Permission to use, copy, and modify this software with or without fee
14 * is hereby granted, provided that this entire notice is included in
15 * all source code copies of any software which is or includes a copy or
16 * modification of this software.
18 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
19 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
20 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
21 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
24 * $FreeBSD: src/sys/opencrypto/cryptosoft.c,v 1.23 2009/02/05 17:43:12 imp Exp $
25 * $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/malloc.h>
32 #include <sys/module.h>
33 #include <sys/sysctl.h>
34 #include <sys/errno.h>
35 #include <sys/random.h>
36 #include <sys/kernel.h>
38 #include <sys/spinlock2.h>
40 #include <crypto/blowfish/blowfish.h>
41 #include <crypto/sha1.h>
42 #include <opencrypto/rmd160.h>
43 #include <opencrypto/cast.h>
44 #include <opencrypto/skipjack.h>
47 #include <opencrypto/cryptodev.h>
48 #include <opencrypto/cryptosoft.h>
49 #include <opencrypto/xform.h>
53 #include "cryptodev_if.h"
55 static int32_t swcr_id;
56 static struct swcr_data **swcr_sessions = NULL;
57 static u_int32_t swcr_sesnum;
58 static u_int32_t swcr_minsesnum = 1;
60 static struct spinlock swcr_spin = SPINLOCK_INITIALIZER(swcr_spin);
62 u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN];
63 u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN];
65 static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
66 static int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int);
67 static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
68 static int swcr_freesession(device_t dev, u_int64_t tid);
69 static int swcr_freesession_slot(struct swcr_data **swdp, u_int32_t sid);
72 * Apply a symmetric encryption/decryption algorithm.
75 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
78 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
79 unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
82 struct enc_xform *exf;
83 int i, k, j, blks, ivlen;
85 int explicit_kschedule;
88 blks = exf->blocksize;
91 /* Check for non-padded data */
92 if (crd->crd_len % blks)
95 /* Initialize the IV */
96 if (crd->crd_flags & CRD_F_ENCRYPT) {
97 /* IV explicitly provided ? */
98 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
99 bcopy(crd->crd_iv, iv, ivlen);
101 karc4rand(iv, ivlen);
103 /* Do we need to write the IV */
104 if (!(crd->crd_flags & CRD_F_IV_PRESENT))
105 crypto_copyback(flags, buf, crd->crd_inject, ivlen, iv);
107 } else { /* Decryption */
108 /* IV explicitly provided ? */
109 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
110 bcopy(crd->crd_iv, iv, ivlen);
113 crypto_copydata(flags, buf, crd->crd_inject, ivlen, iv);
120 * xforms that provide a reinit method perform all IV
121 * handling themselves.
124 exf->reinit(sw->sw_kschedule, iv);
127 * The semantics are seriously broken because the session key
128 * storage was never designed for concurrent ops.
130 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
132 explicit_kschedule = 1;
133 error = exf->setkey(&kschedule,
134 crd->crd_key, crd->crd_klen / 8);
138 spin_lock_wr(&swcr_spin);
139 kschedule = sw->sw_kschedule;
140 ++sw->sw_kschedule_refs;
141 spin_unlock_wr(&swcr_spin);
142 explicit_kschedule = 0;
145 if (flags & CRYPTO_F_IMBUF) {
146 struct mbuf *m = (struct mbuf *) buf;
148 /* Find beginning of data */
149 m = m_getptr(m, crd->crd_skip, &k);
159 * If there's insufficient data at the end of
160 * an mbuf, we have to do some copying.
162 if (m->m_len < k + blks && m->m_len != k) {
163 m_copydata(m, k, blks, blk);
165 /* Actual encryption/decryption */
167 if (crd->crd_flags & CRD_F_ENCRYPT) {
168 exf->encrypt(kschedule,
171 exf->decrypt(kschedule,
174 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
175 /* XOR with previous block */
176 for (j = 0; j < blks; j++)
179 exf->encrypt(kschedule, blk);
182 * Keep encrypted block for XOR'ing
185 bcopy(blk, iv, blks);
187 } else { /* decrypt */
189 * Keep encrypted block for XOR'ing
193 bcopy(blk, piv, blks);
195 bcopy(blk, iv, blks);
197 exf->decrypt(kschedule, blk);
199 /* XOR with previous block */
200 for (j = 0; j < blks; j++)
204 bcopy(piv, iv, blks);
209 /* Copy back decrypted block */
210 m_copyback(m, k, blks, blk);
212 /* Advance pointer */
213 m = m_getptr(m, k + blks, &k);
221 /* Could be done... */
226 /* Skip possibly empty mbufs */
228 for (m = m->m_next; m && m->m_len == 0;
241 * Warning: idat may point to garbage here, but
242 * we only use it in the while() loop, only if
243 * there are indeed enough data.
245 idat = mtod(m, unsigned char *) + k;
247 while (m->m_len >= k + blks && i > 0) {
249 if (crd->crd_flags & CRD_F_ENCRYPT) {
250 exf->encrypt(kschedule,
253 exf->decrypt(kschedule,
256 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
257 /* XOR with previous block/IV */
258 for (j = 0; j < blks; j++)
261 exf->encrypt(kschedule, idat);
263 } else { /* decrypt */
265 * Keep encrypted block to be used
266 * in next block's processing.
269 bcopy(idat, piv, blks);
271 bcopy(idat, iv, blks);
273 exf->decrypt(kschedule, idat);
275 /* XOR with previous block/IV */
276 for (j = 0; j < blks; j++)
280 bcopy(piv, iv, blks);
290 error = 0; /* Done with mbuf encryption/decryption */
291 } else if (flags & CRYPTO_F_IOV) {
292 struct uio *uio = (struct uio *) buf;
295 /* Find beginning of data */
296 iov = cuio_getptr(uio, crd->crd_skip, &k);
306 * If there's insufficient data at the end of
307 * an iovec, we have to do some copying.
309 if (iov->iov_len < k + blks && iov->iov_len != k) {
310 cuio_copydata(uio, k, blks, blk);
312 /* Actual encryption/decryption */
314 if (crd->crd_flags & CRD_F_ENCRYPT) {
315 exf->encrypt(kschedule,
318 exf->decrypt(kschedule,
321 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
322 /* XOR with previous block */
323 for (j = 0; j < blks; j++)
326 exf->encrypt(kschedule, blk);
329 * Keep encrypted block for XOR'ing
332 bcopy(blk, iv, blks);
334 } else { /* decrypt */
336 * Keep encrypted block for XOR'ing
340 bcopy(blk, piv, blks);
342 bcopy(blk, iv, blks);
344 exf->decrypt(kschedule, blk);
346 /* XOR with previous block */
347 for (j = 0; j < blks; j++)
351 bcopy(piv, iv, blks);
356 /* Copy back decrypted block */
357 cuio_copyback(uio, k, blks, blk);
359 /* Advance pointer */
360 iov = cuio_getptr(uio, k + blks, &k);
368 /* Could be done... */
374 * Warning: idat may point to garbage here, but
375 * we only use it in the while() loop, only if
376 * there are indeed enough data.
378 idat = (char *)iov->iov_base + k;
380 while (iov->iov_len >= k + blks && i > 0) {
382 if (crd->crd_flags & CRD_F_ENCRYPT) {
383 exf->encrypt(kschedule,
386 exf->decrypt(kschedule,
389 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
390 /* XOR with previous block/IV */
391 for (j = 0; j < blks; j++)
394 exf->encrypt(kschedule, idat);
396 } else { /* decrypt */
398 * Keep encrypted block to be used
399 * in next block's processing.
402 bcopy(idat, piv, blks);
404 bcopy(idat, iv, blks);
406 exf->decrypt(kschedule, idat);
408 /* XOR with previous block/IV */
409 for (j = 0; j < blks; j++)
413 bcopy(piv, iv, blks);
422 if (k == iov->iov_len) {
427 error = 0; /* Done with iovec encryption/decryption */
432 if (crd->crd_flags & CRD_F_ENCRYPT) {
433 for (i = crd->crd_skip;
434 i < crd->crd_skip + crd->crd_len; i += blks) {
435 /* XOR with the IV/previous block, as appropriate. */
436 if (i == crd->crd_skip)
437 for (k = 0; k < blks; k++)
438 buf[i + k] ^= ivp[k];
440 for (k = 0; k < blks; k++)
441 buf[i + k] ^= buf[i + k - blks];
442 exf->encrypt(kschedule, buf + i);
444 } else { /* Decrypt */
446 * Start at the end, so we don't need to keep the
447 * encrypted block as the IV for the next block.
449 for (i = crd->crd_skip + crd->crd_len - blks;
450 i >= crd->crd_skip; i -= blks) {
451 exf->decrypt(kschedule, buf + i);
453 /* XOR with the IV/previous block, as appropriate */
454 if (i == crd->crd_skip)
455 for (k = 0; k < blks; k++)
456 buf[i + k] ^= ivp[k];
458 for (k = 0; k < blks; k++)
459 buf[i + k] ^= buf[i + k - blks];
462 error = 0; /* Done w/contiguous buffer encrypt/decrypt */
466 * Cleanup - explicitly replace the session key if requested
467 * (horrible semantics for concurrent operation)
469 if (explicit_kschedule) {
470 spin_lock_wr(&swcr_spin);
471 if (sw->sw_kschedule && sw->sw_kschedule_refs == 0) {
472 okschedule = sw->sw_kschedule;
473 sw->sw_kschedule = kschedule;
477 spin_unlock_wr(&swcr_spin);
479 exf->zerokey(&okschedule);
481 spin_lock_wr(&swcr_spin);
482 --sw->sw_kschedule_refs;
483 spin_unlock_wr(&swcr_spin);
489 swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key,
497 case CRYPTO_MD5_HMAC:
498 case CRYPTO_SHA1_HMAC:
499 case CRYPTO_SHA2_256_HMAC:
500 case CRYPTO_SHA2_384_HMAC:
501 case CRYPTO_SHA2_512_HMAC:
502 case CRYPTO_NULL_HMAC:
503 case CRYPTO_RIPEMD160_HMAC:
504 for (k = 0; k < klen; k++)
505 key[k] ^= HMAC_IPAD_VAL;
507 axf->Init(sw->sw_ictx);
508 axf->Update(sw->sw_ictx, key, klen);
509 axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen);
511 for (k = 0; k < klen; k++)
512 key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
514 axf->Init(sw->sw_octx);
515 axf->Update(sw->sw_octx, key, klen);
516 axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen);
518 for (k = 0; k < klen; k++)
519 key[k] ^= HMAC_OPAD_VAL;
521 case CRYPTO_MD5_KPDK:
522 case CRYPTO_SHA1_KPDK:
524 /* We need a buffer that can hold an md5 and a sha1 result. */
525 u_char buf[SHA1_RESULTLEN];
528 bcopy(key, sw->sw_octx, klen);
529 axf->Init(sw->sw_ictx);
530 axf->Update(sw->sw_ictx, key, klen);
531 axf->Final(buf, sw->sw_ictx);
535 kprintf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d "
536 "doesn't use keys.\n", __func__, axf->type);
541 * Compute keyed-hash authenticator.
544 swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
547 unsigned char aalg[HASH_MAX_LEN];
548 struct auth_hash *axf;
552 if (sw->sw_ictx == 0)
557 if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
558 swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen);
560 bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
562 err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len,
563 (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx);
567 switch (sw->sw_alg) {
568 case CRYPTO_MD5_HMAC:
569 case CRYPTO_SHA1_HMAC:
570 case CRYPTO_SHA2_256_HMAC:
571 case CRYPTO_SHA2_384_HMAC:
572 case CRYPTO_SHA2_512_HMAC:
573 case CRYPTO_RIPEMD160_HMAC:
574 if (sw->sw_octx == NULL)
577 axf->Final(aalg, &ctx);
578 bcopy(sw->sw_octx, &ctx, axf->ctxsize);
579 axf->Update(&ctx, aalg, axf->hashsize);
580 axf->Final(aalg, &ctx);
583 case CRYPTO_MD5_KPDK:
584 case CRYPTO_SHA1_KPDK:
585 if (sw->sw_octx == NULL)
588 axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
589 axf->Final(aalg, &ctx);
592 case CRYPTO_NULL_HMAC:
593 axf->Final(aalg, &ctx);
597 /* Inject the authentication data */
598 crypto_copyback(flags, buf, crd->crd_inject,
599 sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg);
604 * Apply a compression/decompression algorithm
607 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
608 caddr_t buf, int flags)
610 u_int8_t *data, *out;
611 struct comp_algo *cxf;
618 * We must handle the whole buffer of data in one time
619 * then if there is not all the data in the mbuf, we must
622 data = kmalloc(crd->crd_len, M_CRYPTO_DATA, M_INTWAIT);
625 crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data);
627 if (crd->crd_flags & CRD_F_COMP)
628 result = cxf->compress(data, crd->crd_len, &out);
630 result = cxf->decompress(data, crd->crd_len, &out);
632 kfree(data, M_CRYPTO_DATA);
636 /* Copy back the (de)compressed data. m_copyback is
637 * extending the mbuf as necessary.
639 sw->sw_size = result;
640 /* Check the compressed size when doing compression */
641 if (crd->crd_flags & CRD_F_COMP) {
642 if (result >= crd->crd_len) {
643 /* Compression was useless, we lost time */
644 kfree(out, M_CRYPTO_DATA);
649 crypto_copyback(flags, buf, crd->crd_skip, result, out);
650 if (result < crd->crd_len) {
651 adj = result - crd->crd_len;
652 if (flags & CRYPTO_F_IMBUF) {
653 adj = result - crd->crd_len;
654 m_adj((struct mbuf *)buf, adj);
655 } else if (flags & CRYPTO_F_IOV) {
656 struct uio *uio = (struct uio *)buf;
659 adj = crd->crd_len - result;
660 ind = uio->uio_iovcnt - 1;
662 while (adj > 0 && ind >= 0) {
663 if (adj < uio->uio_iov[ind].iov_len) {
664 uio->uio_iov[ind].iov_len -= adj;
668 adj -= uio->uio_iov[ind].iov_len;
669 uio->uio_iov[ind].iov_len = 0;
675 kfree(out, M_CRYPTO_DATA);
680 * Generate a new software session.
683 swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
685 struct swcr_data *swd_base;
686 struct swcr_data **swd;
687 struct swcr_data **oswd;
688 struct auth_hash *axf;
689 struct enc_xform *txf;
690 struct comp_algo *cxf;
695 if (sid == NULL || cri == NULL)
702 *swd = kmalloc(sizeof(struct swcr_data),
703 M_CRYPTO_DATA, M_WAITOK | M_ZERO);
705 switch (cri->cri_alg) {
707 txf = &enc_xform_des;
709 case CRYPTO_3DES_CBC:
710 txf = &enc_xform_3des;
713 txf = &enc_xform_blf;
715 case CRYPTO_CAST_CBC:
716 txf = &enc_xform_cast5;
718 case CRYPTO_SKIPJACK_CBC:
719 txf = &enc_xform_skipjack;
721 case CRYPTO_RIJNDAEL128_CBC:
722 txf = &enc_xform_rijndael128;
725 txf = &enc_xform_aes_xts;
728 txf = &enc_xform_aes_ctr;
730 case CRYPTO_CAMELLIA_CBC:
731 txf = &enc_xform_camellia;
733 case CRYPTO_NULL_CBC:
734 txf = &enc_xform_null;
737 if (cri->cri_key != NULL) {
738 error = txf->setkey(&((*swd)->sw_kschedule),
742 swcr_freesession_slot(&swd_base, 0);
746 (*swd)->sw_exf = txf;
749 case CRYPTO_MD5_HMAC:
750 axf = &auth_hash_hmac_md5;
752 case CRYPTO_SHA1_HMAC:
753 axf = &auth_hash_hmac_sha1;
755 case CRYPTO_SHA2_256_HMAC:
756 axf = &auth_hash_hmac_sha2_256;
758 case CRYPTO_SHA2_384_HMAC:
759 axf = &auth_hash_hmac_sha2_384;
761 case CRYPTO_SHA2_512_HMAC:
762 axf = &auth_hash_hmac_sha2_512;
764 case CRYPTO_NULL_HMAC:
765 axf = &auth_hash_null;
767 case CRYPTO_RIPEMD160_HMAC:
768 axf = &auth_hash_hmac_ripemd_160;
770 (*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
772 if ((*swd)->sw_ictx == NULL) {
773 swcr_freesession_slot(&swd_base, 0);
777 (*swd)->sw_octx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
779 if ((*swd)->sw_octx == NULL) {
780 swcr_freesession_slot(&swd_base, 0);
784 if (cri->cri_key != NULL) {
785 swcr_authprepare(axf, *swd, cri->cri_key,
789 (*swd)->sw_mlen = cri->cri_mlen;
790 (*swd)->sw_axf = axf;
793 case CRYPTO_MD5_KPDK:
794 axf = &auth_hash_key_md5;
797 case CRYPTO_SHA1_KPDK:
798 axf = &auth_hash_key_sha1;
800 (*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
802 if ((*swd)->sw_ictx == NULL) {
803 swcr_freesession_slot(&swd_base, 0);
807 (*swd)->sw_octx = kmalloc(cri->cri_klen / 8,
808 M_CRYPTO_DATA, M_WAITOK);
809 if ((*swd)->sw_octx == NULL) {
810 swcr_freesession_slot(&swd_base, 0);
814 /* Store the key so we can "append" it to the payload */
815 if (cri->cri_key != NULL) {
816 swcr_authprepare(axf, *swd, cri->cri_key,
820 (*swd)->sw_mlen = cri->cri_mlen;
821 (*swd)->sw_axf = axf;
825 axf = &auth_hash_md5;
829 axf = &auth_hash_sha1;
831 (*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
833 if ((*swd)->sw_ictx == NULL) {
834 swcr_freesession_slot(&swd_base, 0);
838 axf->Init((*swd)->sw_ictx);
839 (*swd)->sw_mlen = cri->cri_mlen;
840 (*swd)->sw_axf = axf;
843 case CRYPTO_DEFLATE_COMP:
844 cxf = &comp_algo_deflate;
845 (*swd)->sw_cxf = cxf;
848 swcr_freesession_slot(&swd_base, 0);
852 (*swd)->sw_alg = cri->cri_alg;
854 swd = &((*swd)->sw_next);
859 * Atomically allocate a session
861 spin_lock_wr(&swcr_spin);
862 for (i = swcr_minsesnum; i < swcr_sesnum; ++i) {
863 if (swcr_sessions[i] == NULL)
866 if (i < swcr_sesnum) {
867 swcr_sessions[i] = swd_base;
868 swcr_minsesnum = i + 1;
869 spin_unlock_wr(&swcr_spin);
873 spin_unlock_wr(&swcr_spin);
876 * A larger allocation is required, reallocate the array
877 * and replace, checking for SMP races.
879 if (n < CRYPTO_SW_SESSIONS)
880 n = CRYPTO_SW_SESSIONS;
883 swd = kmalloc(n * sizeof(struct swcr_data *),
884 M_CRYPTO_DATA, M_WAITOK | M_ZERO);
886 spin_lock_wr(&swcr_spin);
887 if (swcr_sesnum >= n) {
888 spin_unlock_wr(&swcr_spin);
889 kfree(swd, M_CRYPTO_DATA);
890 } else if (swcr_sesnum) {
891 bcopy(swcr_sessions, swd,
892 swcr_sesnum * sizeof(struct swcr_data *));
893 oswd = swcr_sessions;
896 spin_unlock_wr(&swcr_spin);
897 kfree(oswd, M_CRYPTO_DATA);
901 spin_unlock_wr(&swcr_spin);
913 swcr_freesession(device_t dev, u_int64_t tid)
915 u_int32_t sid = CRYPTO_SESID2LID(tid);
917 if (sid > swcr_sesnum || swcr_sessions == NULL ||
918 swcr_sessions[sid] == NULL) {
922 /* Silently accept and return */
926 return(swcr_freesession_slot(&swcr_sessions[sid], sid));
931 swcr_freesession_slot(struct swcr_data **swdp, u_int32_t sid)
933 struct enc_xform *txf;
934 struct auth_hash *axf;
935 struct comp_algo *cxf;
936 struct swcr_data *swd;
937 struct swcr_data *swnext;
940 * Protect session detachment with the spinlock.
942 spin_lock_wr(&swcr_spin);
945 if (sid && swcr_minsesnum > sid)
946 swcr_minsesnum = sid;
947 spin_unlock_wr(&swcr_spin);
950 * Clean up at our leisure.
952 while ((swd = swnext) != NULL) {
953 swnext = swd->sw_next;
957 switch (swd->sw_alg) {
959 case CRYPTO_3DES_CBC:
961 case CRYPTO_CAST_CBC:
962 case CRYPTO_SKIPJACK_CBC:
963 case CRYPTO_RIJNDAEL128_CBC:
966 case CRYPTO_CAMELLIA_CBC:
967 case CRYPTO_NULL_CBC:
970 if (swd->sw_kschedule)
971 txf->zerokey(&(swd->sw_kschedule));
974 case CRYPTO_MD5_HMAC:
975 case CRYPTO_SHA1_HMAC:
976 case CRYPTO_SHA2_256_HMAC:
977 case CRYPTO_SHA2_384_HMAC:
978 case CRYPTO_SHA2_512_HMAC:
979 case CRYPTO_RIPEMD160_HMAC:
980 case CRYPTO_NULL_HMAC:
984 bzero(swd->sw_ictx, axf->ctxsize);
985 kfree(swd->sw_ictx, M_CRYPTO_DATA);
988 bzero(swd->sw_octx, axf->ctxsize);
989 kfree(swd->sw_octx, M_CRYPTO_DATA);
993 case CRYPTO_MD5_KPDK:
994 case CRYPTO_SHA1_KPDK:
998 bzero(swd->sw_ictx, axf->ctxsize);
999 kfree(swd->sw_ictx, M_CRYPTO_DATA);
1002 bzero(swd->sw_octx, swd->sw_klen);
1003 kfree(swd->sw_octx, M_CRYPTO_DATA);
1012 kfree(swd->sw_ictx, M_CRYPTO_DATA);
1015 case CRYPTO_DEFLATE_COMP:
1020 //FREE(swd, M_CRYPTO_DATA);
1021 kfree(swd, M_CRYPTO_DATA);
1027 * Process a software request.
1030 swcr_process(device_t dev, struct cryptop *crp, int hint)
1032 struct cryptodesc *crd;
1033 struct swcr_data *sw;
1040 if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
1041 crp->crp_etype = EINVAL;
1045 lid = crp->crp_sid & 0xffffffff;
1046 if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
1047 crp->crp_etype = ENOENT;
1051 /* Go through crypto descriptors, processing as we go */
1052 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1054 * Find the crypto context.
1056 * XXX Note that the logic here prevents us from having
1057 * XXX the same algorithm multiple times in a session
1058 * XXX (or rather, we can but it won't give us the right
1059 * XXX results). To do that, we'd need some way of differentiating
1060 * XXX between the various instances of an algorithm (so we can
1061 * XXX locate the correct crypto context).
1063 for (sw = swcr_sessions[lid];
1064 sw && sw->sw_alg != crd->crd_alg;
1068 /* No such context ? */
1070 crp->crp_etype = EINVAL;
1073 switch (sw->sw_alg) {
1074 case CRYPTO_DES_CBC:
1075 case CRYPTO_3DES_CBC:
1076 case CRYPTO_BLF_CBC:
1077 case CRYPTO_CAST_CBC:
1078 case CRYPTO_SKIPJACK_CBC:
1079 case CRYPTO_RIJNDAEL128_CBC:
1080 case CRYPTO_AES_XTS:
1081 case CRYPTO_AES_CTR:
1082 case CRYPTO_CAMELLIA_CBC:
1083 if ((crp->crp_etype = swcr_encdec(crd, sw,
1084 crp->crp_buf, crp->crp_flags)) != 0)
1087 case CRYPTO_NULL_CBC:
1090 case CRYPTO_MD5_HMAC:
1091 case CRYPTO_SHA1_HMAC:
1092 case CRYPTO_SHA2_256_HMAC:
1093 case CRYPTO_SHA2_384_HMAC:
1094 case CRYPTO_SHA2_512_HMAC:
1095 case CRYPTO_RIPEMD160_HMAC:
1096 case CRYPTO_NULL_HMAC:
1097 case CRYPTO_MD5_KPDK:
1098 case CRYPTO_SHA1_KPDK:
1101 if ((crp->crp_etype = swcr_authcompute(crd, sw,
1102 crp->crp_buf, crp->crp_flags)) != 0)
1106 case CRYPTO_DEFLATE_COMP:
1107 if ((crp->crp_etype = swcr_compdec(crd, sw,
1108 crp->crp_buf, crp->crp_flags)) != 0)
1111 crp->crp_olen = (int)sw->sw_size;
1115 /* Unknown/unsupported algorithm */
1116 crp->crp_etype = EINVAL;
1127 swcr_identify(driver_t *drv, device_t parent)
1129 /* NB: order 10 is so we get attached after h/w devices */
1130 /* XXX: wouldn't bet about this BUS_ADD_CHILD correctness */
1131 if (device_find_child(parent, "cryptosoft", -1) == NULL &&
1132 BUS_ADD_CHILD(parent, parent, 10, "cryptosoft", -1) == 0)
1133 panic("cryptosoft: could not attach");
1137 swcr_probe(device_t dev)
1139 device_set_desc(dev, "software crypto");
1144 swcr_attach(device_t dev)
1146 memset(hmac_ipad_buffer, HMAC_IPAD_VAL, HMAC_MAX_BLOCK_LEN);
1147 memset(hmac_opad_buffer, HMAC_OPAD_VAL, HMAC_MAX_BLOCK_LEN);
1149 swcr_id = crypto_get_driverid(dev, CRYPTOCAP_F_SOFTWARE |
1153 device_printf(dev, "cannot initialize!");
1156 #define REGISTER(alg) \
1157 crypto_register(swcr_id, alg, 0,0)
1158 REGISTER(CRYPTO_DES_CBC);
1159 REGISTER(CRYPTO_3DES_CBC);
1160 REGISTER(CRYPTO_BLF_CBC);
1161 REGISTER(CRYPTO_CAST_CBC);
1162 REGISTER(CRYPTO_SKIPJACK_CBC);
1163 REGISTER(CRYPTO_NULL_CBC);
1164 REGISTER(CRYPTO_MD5_HMAC);
1165 REGISTER(CRYPTO_SHA1_HMAC);
1166 REGISTER(CRYPTO_SHA2_256_HMAC);
1167 REGISTER(CRYPTO_SHA2_384_HMAC);
1168 REGISTER(CRYPTO_SHA2_512_HMAC);
1169 REGISTER(CRYPTO_RIPEMD160_HMAC);
1170 REGISTER(CRYPTO_NULL_HMAC);
1171 REGISTER(CRYPTO_MD5_KPDK);
1172 REGISTER(CRYPTO_SHA1_KPDK);
1173 REGISTER(CRYPTO_MD5);
1174 REGISTER(CRYPTO_SHA1);
1175 REGISTER(CRYPTO_RIJNDAEL128_CBC);
1176 REGISTER(CRYPTO_AES_XTS);
1177 REGISTER(CRYPTO_AES_CTR);
1178 REGISTER(CRYPTO_CAMELLIA_CBC);
1179 REGISTER(CRYPTO_DEFLATE_COMP);
1186 swcr_detach(device_t dev)
1188 crypto_unregister_all(swcr_id);
1189 if (swcr_sessions != NULL)
1190 kfree(swcr_sessions, M_CRYPTO_DATA);
1194 static device_method_t swcr_methods[] = {
1195 DEVMETHOD(device_identify, swcr_identify),
1196 DEVMETHOD(device_probe, swcr_probe),
1197 DEVMETHOD(device_attach, swcr_attach),
1198 DEVMETHOD(device_detach, swcr_detach),
1200 DEVMETHOD(cryptodev_newsession, swcr_newsession),
1201 DEVMETHOD(cryptodev_freesession,swcr_freesession),
1202 DEVMETHOD(cryptodev_process, swcr_process),
1207 static driver_t swcr_driver = {
1210 0, /* NB: no softc */
1212 static devclass_t swcr_devclass;
1215 * NB: We explicitly reference the crypto module so we
1216 * get the necessary ordering when built as a loadable
1217 * module. This is required because we bundle the crypto
1218 * module code together with the cryptosoft driver (otherwise
1219 * normal module dependencies would handle things).
1221 extern int crypto_modevent(struct module *, int, void *);
1222 /* XXX where to attach */
1223 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0);
1224 MODULE_VERSION(cryptosoft, 1);
1225 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);