1 /* $FreeBSD: src/sys/opencrypto/cryptosoft.c,v 1.2.2.1 2002/11/21 23:34:23 sam Exp $ */
2 /* $DragonFly: src/sys/opencrypto/cryptosoft.c,v 1.6 2007/12/04 09:11:12 hasso Exp $ */
3 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */
6 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
8 * This code was written by Angelos D. Keromytis in Athens, Greece, in
9 * February 2000. Network Security Technologies Inc. (NSTI) kindly
10 * supported the development of this code.
12 * Copyright (c) 2000, 2001 Angelos D. Keromytis
14 * Permission to use, copy, and modify this software with or without fee
15 * is hereby granted, provided that this entire notice is included in
16 * all source code copies of any software which is or includes a copy or
17 * modification of this software.
19 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
20 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
21 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
22 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/malloc.h>
30 #include <sys/sysctl.h>
31 #include <sys/errno.h>
32 #include <sys/random.h>
33 #include <sys/kernel.h>
36 #include <crypto/blowfish/blowfish.h>
37 #include <crypto/cast128/cast128.h>
38 #include <crypto/sha1.h>
39 #include <opencrypto/rmd160.h>
40 #include <opencrypto/skipjack.h>
43 #include <opencrypto/cryptodev.h>
44 #include <opencrypto/cryptosoft.h>
45 #include <opencrypto/xform.h>
47 u_int8_t hmac_ipad_buffer[64] = {
48 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
49 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
50 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
51 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
52 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
53 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
54 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
55 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36
58 u_int8_t hmac_opad_buffer[64] = {
59 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
60 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
61 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
62 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
63 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
64 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
65 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
66 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C
70 struct swcr_data **swcr_sessions = NULL;
71 u_int32_t swcr_sesnum = 0;
74 #define COPYBACK(x, a, b, c, d) \
75 (x) == CRYPTO_BUF_MBUF ? m_copyback((struct mbuf *)a,b,c,d) \
76 : cuio_copyback((struct uio *)a,b,c,d)
77 #define COPYDATA(x, a, b, c, d) \
78 (x) == CRYPTO_BUF_MBUF ? m_copydata((struct mbuf *)a,b,c,d) \
79 : cuio_copydata((struct uio *)a,b,c,d)
81 static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
82 static int swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd,
83 struct swcr_data *sw, caddr_t buf, int outtype);
84 static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
85 static int swcr_process(void *, struct cryptop *, int);
86 static int swcr_newsession(void *, u_int32_t *, struct cryptoini *);
87 static int swcr_freesession(void *, u_int64_t);
90 * NB: These came over from openbsd and are kept private
91 * to the crypto code for now.
93 extern int m_apply(struct mbuf *m, int off, int len,
94 int (*f)(caddr_t, caddr_t, unsigned int), caddr_t fstate);
97 * Apply a symmetric encryption/decryption algorithm.
100 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
103 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
104 unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
105 struct enc_xform *exf;
109 blks = exf->blocksize;
111 /* Check for non-padded data */
112 if (crd->crd_len % blks)
115 /* Initialize the IV */
116 if (crd->crd_flags & CRD_F_ENCRYPT) {
117 /* IV explicitly provided ? */
118 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
119 bcopy(crd->crd_iv, iv, blks);
123 i + sizeof (u_int32_t) < EALG_MAX_BLOCK_LEN;
124 i += sizeof (u_int32_t)) {
125 u_int32_t temp = karc4random();
127 bcopy(&temp, iv + i, sizeof(u_int32_t));
130 * What if the block size is not a multiple
131 * of sizeof (u_int32_t), which is the size of
132 * what karc4random() returns ?
134 if (EALG_MAX_BLOCK_LEN % sizeof (u_int32_t) != 0) {
135 u_int32_t temp = karc4random();
137 bcopy (&temp, iv + i,
138 EALG_MAX_BLOCK_LEN - i);
142 /* Do we need to write the IV */
143 if (!(crd->crd_flags & CRD_F_IV_PRESENT)) {
144 COPYBACK(outtype, buf, crd->crd_inject, blks, iv);
147 } else { /* Decryption */
148 /* IV explicitly provided ? */
149 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
150 bcopy(crd->crd_iv, iv, blks);
153 COPYDATA(outtype, buf, crd->crd_inject, blks, iv);
157 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
160 if (sw->sw_kschedule)
161 exf->zerokey(&(sw->sw_kschedule));
162 error = exf->setkey(&sw->sw_kschedule,
163 crd->crd_key, crd->crd_klen / 8);
170 if (outtype == CRYPTO_BUF_CONTIG) {
171 if (crd->crd_flags & CRD_F_ENCRYPT) {
172 for (i = crd->crd_skip;
173 i < crd->crd_skip + crd->crd_len; i += blks) {
174 /* XOR with the IV/previous block, as appropriate. */
175 if (i == crd->crd_skip)
176 for (k = 0; k < blks; k++)
177 buf[i + k] ^= ivp[k];
179 for (k = 0; k < blks; k++)
180 buf[i + k] ^= buf[i + k - blks];
181 exf->encrypt(sw->sw_kschedule, buf + i);
183 } else { /* Decrypt */
185 * Start at the end, so we don't need to keep the encrypted
186 * block as the IV for the next block.
188 for (i = crd->crd_skip + crd->crd_len - blks;
189 i >= crd->crd_skip; i -= blks) {
190 exf->decrypt(sw->sw_kschedule, buf + i);
192 /* XOR with the IV/previous block, as appropriate */
193 if (i == crd->crd_skip)
194 for (k = 0; k < blks; k++)
195 buf[i + k] ^= ivp[k];
197 for (k = 0; k < blks; k++)
198 buf[i + k] ^= buf[i + k - blks];
203 } else if (outtype == CRYPTO_BUF_MBUF) {
204 struct mbuf *m = (struct mbuf *) buf;
206 /* Find beginning of data */
207 m = m_getptr(m, crd->crd_skip, &k);
215 * If there's insufficient data at the end of
216 * an mbuf, we have to do some copying.
218 if (m->m_len < k + blks && m->m_len != k) {
219 m_copydata(m, k, blks, blk);
221 /* Actual encryption/decryption */
222 if (crd->crd_flags & CRD_F_ENCRYPT) {
223 /* XOR with previous block */
224 for (j = 0; j < blks; j++)
227 exf->encrypt(sw->sw_kschedule, blk);
230 * Keep encrypted block for XOR'ing
233 bcopy(blk, iv, blks);
235 } else { /* decrypt */
237 * Keep encrypted block for XOR'ing
241 bcopy(blk, piv, blks);
243 bcopy(blk, iv, blks);
245 exf->decrypt(sw->sw_kschedule, blk);
247 /* XOR with previous block */
248 for (j = 0; j < blks; j++)
252 bcopy(piv, iv, blks);
257 /* Copy back decrypted block */
258 m_copyback(m, k, blks, blk);
260 /* Advance pointer */
261 m = m_getptr(m, k + blks, &k);
267 /* Could be done... */
272 /* Skip possibly empty mbufs */
274 for (m = m->m_next; m && m->m_len == 0;
285 * Warning: idat may point to garbage here, but
286 * we only use it in the while() loop, only if
287 * there are indeed enough data.
289 idat = mtod(m, unsigned char *) + k;
291 while (m->m_len >= k + blks && i > 0) {
292 if (crd->crd_flags & CRD_F_ENCRYPT) {
293 /* XOR with previous block/IV */
294 for (j = 0; j < blks; j++)
297 exf->encrypt(sw->sw_kschedule, idat);
299 } else { /* decrypt */
301 * Keep encrypted block to be used
302 * in next block's processing.
305 bcopy(idat, piv, blks);
307 bcopy(idat, iv, blks);
309 exf->decrypt(sw->sw_kschedule, idat);
311 /* XOR with previous block/IV */
312 for (j = 0; j < blks; j++)
316 bcopy(piv, iv, blks);
327 return 0; /* Done with mbuf encryption/decryption */
328 } else if (outtype == CRYPTO_BUF_IOV) {
329 struct uio *uio = (struct uio *) buf;
332 /* Find beginning of data */
333 iov = cuio_getptr(uio, crd->crd_skip, &k);
341 * If there's insufficient data at the end of
342 * an iovec, we have to do some copying.
344 if (iov->iov_len < k + blks && iov->iov_len != k) {
345 cuio_copydata(uio, k, blks, blk);
347 /* Actual encryption/decryption */
348 if (crd->crd_flags & CRD_F_ENCRYPT) {
349 /* XOR with previous block */
350 for (j = 0; j < blks; j++)
353 exf->encrypt(sw->sw_kschedule, blk);
356 * Keep encrypted block for XOR'ing
359 bcopy(blk, iv, blks);
361 } else { /* decrypt */
363 * Keep encrypted block for XOR'ing
367 bcopy(blk, piv, blks);
369 bcopy(blk, iv, blks);
371 exf->decrypt(sw->sw_kschedule, blk);
373 /* XOR with previous block */
374 for (j = 0; j < blks; j++)
378 bcopy(piv, iv, blks);
383 /* Copy back decrypted block */
384 cuio_copyback(uio, k, blks, blk);
386 /* Advance pointer */
387 iov = cuio_getptr(uio, k + blks, &k);
393 /* Could be done... */
399 * Warning: idat may point to garbage here, but
400 * we only use it in the while() loop, only if
401 * there are indeed enough data.
403 idat = (char *)iov->iov_base + k;
405 while (iov->iov_len >= k + blks && i > 0) {
406 if (crd->crd_flags & CRD_F_ENCRYPT) {
407 /* XOR with previous block/IV */
408 for (j = 0; j < blks; j++)
411 exf->encrypt(sw->sw_kschedule, idat);
413 } else { /* decrypt */
415 * Keep encrypted block to be used
416 * in next block's processing.
419 bcopy(idat, piv, blks);
421 bcopy(idat, iv, blks);
423 exf->decrypt(sw->sw_kschedule, idat);
425 /* XOR with previous block/IV */
426 for (j = 0; j < blks; j++)
430 bcopy(piv, iv, blks);
441 return 0; /* Done with mbuf encryption/decryption */
449 * Compute keyed-hash authenticator.
452 swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd,
453 struct swcr_data *sw, caddr_t buf, int outtype)
455 unsigned char aalg[AALG_MAX_RESULT_LEN];
456 struct auth_hash *axf;
460 if (sw->sw_ictx == 0)
465 bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
468 case CRYPTO_BUF_CONTIG:
469 axf->Update(&ctx, buf + crd->crd_skip, crd->crd_len);
471 case CRYPTO_BUF_MBUF:
472 err = m_apply((struct mbuf *) buf, crd->crd_skip, crd->crd_len,
473 (int (*)(caddr_t, caddr_t, unsigned int)) axf->Update,
483 switch (sw->sw_alg) {
484 case CRYPTO_MD5_HMAC:
485 case CRYPTO_SHA1_HMAC:
486 case CRYPTO_SHA2_HMAC:
487 case CRYPTO_RIPEMD160_HMAC:
488 if (sw->sw_octx == NULL)
491 axf->Final(aalg, &ctx);
492 bcopy(sw->sw_octx, &ctx, axf->ctxsize);
493 axf->Update(&ctx, aalg, axf->hashsize);
494 axf->Final(aalg, &ctx);
497 case CRYPTO_MD5_KPDK:
498 case CRYPTO_SHA1_KPDK:
499 if (sw->sw_octx == NULL)
502 axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
503 axf->Final(aalg, &ctx);
506 case CRYPTO_NULL_HMAC:
507 axf->Final(aalg, &ctx);
511 /* Inject the authentication data */
512 if (outtype == CRYPTO_BUF_CONTIG)
513 bcopy(aalg, buf + crd->crd_inject, axf->authsize);
515 m_copyback((struct mbuf *) buf, crd->crd_inject,
516 axf->authsize, aalg);
521 * Apply a compression/decompression algorithm
524 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
525 caddr_t buf, int outtype)
527 u_int8_t *data, *out;
528 struct comp_algo *cxf;
534 /* We must handle the whole buffer of data in one time
535 * then if there is not all the data in the mbuf, we must
539 MALLOC(data, u_int8_t *, crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
542 COPYDATA(outtype, buf, crd->crd_skip, crd->crd_len, data);
544 if (crd->crd_flags & CRD_F_COMP)
545 result = cxf->compress(data, crd->crd_len, &out);
547 result = cxf->decompress(data, crd->crd_len, &out);
549 FREE(data, M_CRYPTO_DATA);
553 /* Copy back the (de)compressed data. m_copyback is
554 * extending the mbuf as necessary.
556 sw->sw_size = result;
557 /* Check the compressed size when doing compression */
558 if (crd->crd_flags & CRD_F_COMP) {
559 if (result > crd->crd_len) {
560 /* Compression was useless, we lost time */
561 FREE(out, M_CRYPTO_DATA);
566 COPYBACK(outtype, buf, crd->crd_skip, result, out);
567 if (result < crd->crd_len) {
568 adj = result - crd->crd_len;
569 if (outtype == CRYPTO_BUF_MBUF) {
570 adj = result - crd->crd_len;
571 m_adj((struct mbuf *)buf, adj);
573 struct uio *uio = (struct uio *)buf;
576 adj = crd->crd_len - result;
577 ind = uio->uio_iovcnt - 1;
579 while (adj > 0 && ind >= 0) {
580 if (adj < uio->uio_iov[ind].iov_len) {
581 uio->uio_iov[ind].iov_len -= adj;
585 adj -= uio->uio_iov[ind].iov_len;
586 uio->uio_iov[ind].iov_len = 0;
592 FREE(out, M_CRYPTO_DATA);
597 * Generate a new software session.
600 swcr_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri)
602 struct swcr_data **swd;
603 struct auth_hash *axf;
604 struct enc_xform *txf;
605 struct comp_algo *cxf;
609 if (sid == NULL || cri == NULL)
613 for (i = 1; i < swcr_sesnum; i++)
614 if (swcr_sessions[i] == NULL)
617 i = 1; /* NB: to silence compiler warning */
619 if (swcr_sessions == NULL || i == swcr_sesnum) {
620 if (swcr_sessions == NULL) {
621 i = 1; /* We leave swcr_sessions[0] empty */
622 swcr_sesnum = CRYPTO_SW_SESSIONS;
626 swd = kmalloc(swcr_sesnum * sizeof(struct swcr_data *),
627 M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
629 /* Reset session number */
630 if (swcr_sesnum == CRYPTO_SW_SESSIONS)
637 /* Copy existing sessions */
639 bcopy(swcr_sessions, swd,
640 (swcr_sesnum / 2) * sizeof(struct swcr_data *));
641 kfree(swcr_sessions, M_CRYPTO_DATA);
647 swd = &swcr_sessions[i];
651 MALLOC(*swd, struct swcr_data *, sizeof(struct swcr_data),
652 M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
654 swcr_freesession(NULL, i);
658 switch (cri->cri_alg) {
660 txf = &enc_xform_des;
662 case CRYPTO_3DES_CBC:
663 txf = &enc_xform_3des;
666 txf = &enc_xform_blf;
668 case CRYPTO_CAST_CBC:
669 txf = &enc_xform_cast5;
671 case CRYPTO_SKIPJACK_CBC:
672 txf = &enc_xform_skipjack;
674 case CRYPTO_RIJNDAEL128_CBC:
675 txf = &enc_xform_rijndael128;
677 case CRYPTO_NULL_CBC:
678 txf = &enc_xform_null;
681 error = txf->setkey(&((*swd)->sw_kschedule),
682 cri->cri_key, cri->cri_klen / 8);
684 swcr_freesession(NULL, i);
687 (*swd)->sw_exf = txf;
690 case CRYPTO_MD5_HMAC:
691 axf = &auth_hash_hmac_md5_96;
693 case CRYPTO_SHA1_HMAC:
694 axf = &auth_hash_hmac_sha1_96;
696 case CRYPTO_SHA2_HMAC:
697 if (cri->cri_klen == 256)
698 axf = &auth_hash_hmac_sha2_256;
699 else if (cri->cri_klen == 384)
700 axf = &auth_hash_hmac_sha2_384;
701 else if (cri->cri_klen == 512)
702 axf = &auth_hash_hmac_sha2_512;
704 swcr_freesession(NULL, i);
708 case CRYPTO_NULL_HMAC:
709 axf = &auth_hash_null;
711 case CRYPTO_RIPEMD160_HMAC:
712 axf = &auth_hash_hmac_ripemd_160_96;
714 (*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
716 if ((*swd)->sw_ictx == NULL) {
717 swcr_freesession(NULL, i);
721 (*swd)->sw_octx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
723 if ((*swd)->sw_octx == NULL) {
724 swcr_freesession(NULL, i);
728 for (k = 0; k < cri->cri_klen / 8; k++)
729 cri->cri_key[k] ^= HMAC_IPAD_VAL;
731 axf->Init((*swd)->sw_ictx);
732 axf->Update((*swd)->sw_ictx, cri->cri_key,
734 axf->Update((*swd)->sw_ictx, hmac_ipad_buffer,
735 HMAC_BLOCK_LEN - (cri->cri_klen / 8));
737 for (k = 0; k < cri->cri_klen / 8; k++)
738 cri->cri_key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
740 axf->Init((*swd)->sw_octx);
741 axf->Update((*swd)->sw_octx, cri->cri_key,
743 axf->Update((*swd)->sw_octx, hmac_opad_buffer,
744 HMAC_BLOCK_LEN - (cri->cri_klen / 8));
746 for (k = 0; k < cri->cri_klen / 8; k++)
747 cri->cri_key[k] ^= HMAC_OPAD_VAL;
748 (*swd)->sw_axf = axf;
751 case CRYPTO_MD5_KPDK:
752 axf = &auth_hash_key_md5;
755 case CRYPTO_SHA1_KPDK:
756 axf = &auth_hash_key_sha1;
758 (*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
760 if ((*swd)->sw_ictx == NULL) {
761 swcr_freesession(NULL, i);
765 /* Store the key so we can "append" it to the payload */
766 (*swd)->sw_octx = kmalloc(cri->cri_klen / 8, M_CRYPTO_DATA,
768 if ((*swd)->sw_octx == NULL) {
769 swcr_freesession(NULL, i);
773 (*swd)->sw_klen = cri->cri_klen / 8;
774 bcopy(cri->cri_key, (*swd)->sw_octx, cri->cri_klen / 8);
775 axf->Init((*swd)->sw_ictx);
776 axf->Update((*swd)->sw_ictx, cri->cri_key,
778 axf->Final(NULL, (*swd)->sw_ictx);
779 (*swd)->sw_axf = axf;
783 axf = &auth_hash_md5;
787 axf = &auth_hash_sha1;
789 (*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
791 if ((*swd)->sw_ictx == NULL) {
792 swcr_freesession(NULL, i);
796 axf->Init((*swd)->sw_ictx);
797 (*swd)->sw_axf = axf;
800 case CRYPTO_DEFLATE_COMP:
801 cxf = &comp_algo_deflate;
802 (*swd)->sw_cxf = cxf;
805 swcr_freesession(NULL, i);
809 (*swd)->sw_alg = cri->cri_alg;
811 swd = &((*swd)->sw_next);
820 swcr_freesession(void *arg, u_int64_t tid)
822 struct swcr_data *swd;
823 struct enc_xform *txf;
824 struct auth_hash *axf;
825 struct comp_algo *cxf;
826 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
828 if (sid > swcr_sesnum || swcr_sessions == NULL ||
829 swcr_sessions[sid] == NULL)
832 /* Silently accept and return */
836 while ((swd = swcr_sessions[sid]) != NULL) {
837 swcr_sessions[sid] = swd->sw_next;
839 switch (swd->sw_alg) {
841 case CRYPTO_3DES_CBC:
843 case CRYPTO_CAST_CBC:
844 case CRYPTO_SKIPJACK_CBC:
845 case CRYPTO_RIJNDAEL128_CBC:
846 case CRYPTO_NULL_CBC:
849 if (swd->sw_kschedule)
850 txf->zerokey(&(swd->sw_kschedule));
853 case CRYPTO_MD5_HMAC:
854 case CRYPTO_SHA1_HMAC:
855 case CRYPTO_SHA2_HMAC:
856 case CRYPTO_RIPEMD160_HMAC:
857 case CRYPTO_NULL_HMAC:
861 bzero(swd->sw_ictx, axf->ctxsize);
862 kfree(swd->sw_ictx, M_CRYPTO_DATA);
865 bzero(swd->sw_octx, axf->ctxsize);
866 kfree(swd->sw_octx, M_CRYPTO_DATA);
870 case CRYPTO_MD5_KPDK:
871 case CRYPTO_SHA1_KPDK:
875 bzero(swd->sw_ictx, axf->ctxsize);
876 kfree(swd->sw_ictx, M_CRYPTO_DATA);
879 bzero(swd->sw_octx, swd->sw_klen);
880 kfree(swd->sw_octx, M_CRYPTO_DATA);
889 kfree(swd->sw_ictx, M_CRYPTO_DATA);
892 case CRYPTO_DEFLATE_COMP:
897 FREE(swd, M_CRYPTO_DATA);
903 * Process a software request.
906 swcr_process(void *arg, struct cryptop *crp, int hint)
908 struct cryptodesc *crd;
909 struct swcr_data *sw;
917 if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
918 crp->crp_etype = EINVAL;
922 lid = crp->crp_sid & 0xffffffff;
923 if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
924 crp->crp_etype = ENOENT;
928 if (crp->crp_flags & CRYPTO_F_IMBUF) {
929 type = CRYPTO_BUF_MBUF;
930 } else if (crp->crp_flags & CRYPTO_F_IOV) {
931 type = CRYPTO_BUF_IOV;
933 type = CRYPTO_BUF_CONTIG;
936 /* Go through crypto descriptors, processing as we go */
937 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
939 * Find the crypto context.
941 * XXX Note that the logic here prevents us from having
942 * XXX the same algorithm multiple times in a session
943 * XXX (or rather, we can but it won't give us the right
944 * XXX results). To do that, we'd need some way of differentiating
945 * XXX between the various instances of an algorithm (so we can
946 * XXX locate the correct crypto context).
948 for (sw = swcr_sessions[lid];
949 sw && sw->sw_alg != crd->crd_alg;
953 /* No such context ? */
955 crp->crp_etype = EINVAL;
958 switch (sw->sw_alg) {
960 case CRYPTO_3DES_CBC:
962 case CRYPTO_CAST_CBC:
963 case CRYPTO_SKIPJACK_CBC:
964 case CRYPTO_RIJNDAEL128_CBC:
965 if ((crp->crp_etype = swcr_encdec(crd, sw,
966 crp->crp_buf, type)) != 0)
969 case CRYPTO_NULL_CBC:
972 case CRYPTO_MD5_HMAC:
973 case CRYPTO_SHA1_HMAC:
974 case CRYPTO_SHA2_HMAC:
975 case CRYPTO_RIPEMD160_HMAC:
976 case CRYPTO_NULL_HMAC:
977 case CRYPTO_MD5_KPDK:
978 case CRYPTO_SHA1_KPDK:
981 if ((crp->crp_etype = swcr_authcompute(crp, crd, sw,
982 crp->crp_buf, type)) != 0)
986 case CRYPTO_DEFLATE_COMP:
987 if ((crp->crp_etype = swcr_compdec(crd, sw,
988 crp->crp_buf, type)) != 0)
991 crp->crp_olen = (int)sw->sw_size;
995 /* Unknown/unsupported algorithm */
996 crp->crp_etype = EINVAL;
1007 * Initialize the driver, called from the kernel main().
1012 swcr_id = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE);
1014 panic("Software crypto device cannot initialize!");
1015 crypto_register(swcr_id, CRYPTO_DES_CBC,
1016 0, 0, swcr_newsession, swcr_freesession, swcr_process, NULL);
1017 #define REGISTER(alg) \
1018 crypto_register(swcr_id, alg, 0,0,NULL,NULL,NULL,NULL)
1019 REGISTER(CRYPTO_3DES_CBC);
1020 REGISTER(CRYPTO_BLF_CBC);
1021 REGISTER(CRYPTO_CAST_CBC);
1022 REGISTER(CRYPTO_SKIPJACK_CBC);
1023 REGISTER(CRYPTO_NULL_CBC);
1024 REGISTER(CRYPTO_MD5_HMAC);
1025 REGISTER(CRYPTO_SHA1_HMAC);
1026 REGISTER(CRYPTO_SHA2_HMAC);
1027 REGISTER(CRYPTO_RIPEMD160_HMAC);
1028 REGISTER(CRYPTO_NULL_HMAC);
1029 REGISTER(CRYPTO_MD5_KPDK);
1030 REGISTER(CRYPTO_SHA1_KPDK);
1031 REGISTER(CRYPTO_MD5);
1032 REGISTER(CRYPTO_SHA1);
1033 REGISTER(CRYPTO_RIJNDAEL128_CBC);
1034 REGISTER(CRYPTO_DEFLATE_COMP);
1037 SYSINIT(cryptosoft_init, SI_SUB_PSEUDO, SI_ORDER_ANY, swcr_init, NULL)