1 /* $FreeBSD: src/sys/opencrypto/cryptosoft.c,v 1.2.2.1 2002/11/21 23:34:23 sam Exp $ */
2 /* $DragonFly: src/sys/opencrypto/cryptosoft.c,v 1.2 2003/06/17 04:28:54 dillon Exp $ */
3 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */
6 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
8 * This code was written by Angelos D. Keromytis in Athens, Greece, in
9 * February 2000. Network Security Technologies Inc. (NSTI) kindly
10 * supported the development of this code.
12 * Copyright (c) 2000, 2001 Angelos D. Keromytis
14 * Permission to use, copy, and modify this software with or without fee
15 * is hereby granted, provided that this entire notice is included in
16 * all source code copies of any software which is or includes a copy or
17 * modification of this software.
19 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
20 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
21 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
22 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/malloc.h>
30 #include <sys/sysctl.h>
31 #include <sys/errno.h>
32 #include <sys/random.h>
33 #include <sys/kernel.h>
36 #include <crypto/blowfish/blowfish.h>
37 #include <crypto/cast128/cast128.h>
38 #include <crypto/sha1.h>
39 #include <opencrypto/rmd160.h>
40 #include <opencrypto/skipjack.h>
43 #include <opencrypto/cryptodev.h>
44 #include <opencrypto/cryptosoft.h>
45 #include <opencrypto/xform.h>
47 u_int8_t hmac_ipad_buffer[64] = {
48 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
49 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
50 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
51 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
52 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
53 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
54 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
55 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36
58 u_int8_t hmac_opad_buffer[64] = {
59 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
60 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
61 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
62 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
63 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
64 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
65 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
66 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C
70 struct swcr_data **swcr_sessions = NULL;
71 u_int32_t swcr_sesnum = 0;
74 #define COPYBACK(x, a, b, c, d) \
75 (x) == CRYPTO_BUF_MBUF ? m_copyback((struct mbuf *)a,b,c,d) \
76 : cuio_copyback((struct uio *)a,b,c,d)
77 #define COPYDATA(x, a, b, c, d) \
78 (x) == CRYPTO_BUF_MBUF ? m_copydata((struct mbuf *)a,b,c,d) \
79 : cuio_copydata((struct uio *)a,b,c,d)
81 static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
82 static int swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd,
83 struct swcr_data *sw, caddr_t buf, int outtype);
84 static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
85 static int swcr_process(void *, struct cryptop *, int);
86 static int swcr_newsession(void *, u_int32_t *, struct cryptoini *);
87 static int swcr_freesession(void *, u_int64_t);
90 * NB: These came over from openbsd and are kept private
91 * to the crypto code for now.
93 extern int m_apply(struct mbuf *m, int off, int len,
94 int (*f)(caddr_t, caddr_t, unsigned int), caddr_t fstate);
97 * Apply a symmetric encryption/decryption algorithm.
100 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
103 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
104 unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
105 struct enc_xform *exf;
109 blks = exf->blocksize;
111 /* Check for non-padded data */
112 if (crd->crd_len % blks)
115 /* Initialize the IV */
116 if (crd->crd_flags & CRD_F_ENCRYPT) {
117 /* IV explicitly provided ? */
118 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
119 bcopy(crd->crd_iv, iv, blks);
123 i + sizeof (u_int32_t) < EALG_MAX_BLOCK_LEN;
124 i += sizeof (u_int32_t)) {
125 u_int32_t temp = arc4random();
127 bcopy(&temp, iv + i, sizeof(u_int32_t));
130 * What if the block size is not a multiple
131 * of sizeof (u_int32_t), which is the size of
132 * what arc4random() returns ?
134 if (EALG_MAX_BLOCK_LEN % sizeof (u_int32_t) != 0) {
135 u_int32_t temp = arc4random();
137 bcopy (&temp, iv + i,
138 EALG_MAX_BLOCK_LEN - i);
142 /* Do we need to write the IV */
143 if (!(crd->crd_flags & CRD_F_IV_PRESENT)) {
144 COPYBACK(outtype, buf, crd->crd_inject, blks, iv);
147 } else { /* Decryption */
148 /* IV explicitly provided ? */
149 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
150 bcopy(crd->crd_iv, iv, blks);
153 COPYDATA(outtype, buf, crd->crd_inject, blks, iv);
159 if (outtype == CRYPTO_BUF_CONTIG) {
160 if (crd->crd_flags & CRD_F_ENCRYPT) {
161 for (i = crd->crd_skip;
162 i < crd->crd_skip + crd->crd_len; i += blks) {
163 /* XOR with the IV/previous block, as appropriate. */
164 if (i == crd->crd_skip)
165 for (k = 0; k < blks; k++)
166 buf[i + k] ^= ivp[k];
168 for (k = 0; k < blks; k++)
169 buf[i + k] ^= buf[i + k - blks];
170 exf->encrypt(sw->sw_kschedule, buf + i);
172 } else { /* Decrypt */
174 * Start at the end, so we don't need to keep the encrypted
175 * block as the IV for the next block.
177 for (i = crd->crd_skip + crd->crd_len - blks;
178 i >= crd->crd_skip; i -= blks) {
179 exf->decrypt(sw->sw_kschedule, buf + i);
181 /* XOR with the IV/previous block, as appropriate */
182 if (i == crd->crd_skip)
183 for (k = 0; k < blks; k++)
184 buf[i + k] ^= ivp[k];
186 for (k = 0; k < blks; k++)
187 buf[i + k] ^= buf[i + k - blks];
192 } else if (outtype == CRYPTO_BUF_MBUF) {
193 struct mbuf *m = (struct mbuf *) buf;
195 /* Find beginning of data */
196 m = m_getptr(m, crd->crd_skip, &k);
204 * If there's insufficient data at the end of
205 * an mbuf, we have to do some copying.
207 if (m->m_len < k + blks && m->m_len != k) {
208 m_copydata(m, k, blks, blk);
210 /* Actual encryption/decryption */
211 if (crd->crd_flags & CRD_F_ENCRYPT) {
212 /* XOR with previous block */
213 for (j = 0; j < blks; j++)
216 exf->encrypt(sw->sw_kschedule, blk);
219 * Keep encrypted block for XOR'ing
222 bcopy(blk, iv, blks);
224 } else { /* decrypt */
226 * Keep encrypted block for XOR'ing
230 bcopy(blk, piv, blks);
232 bcopy(blk, iv, blks);
234 exf->decrypt(sw->sw_kschedule, blk);
236 /* XOR with previous block */
237 for (j = 0; j < blks; j++)
241 bcopy(piv, iv, blks);
246 /* Copy back decrypted block */
247 m_copyback(m, k, blks, blk);
249 /* Advance pointer */
250 m = m_getptr(m, k + blks, &k);
256 /* Could be done... */
261 /* Skip possibly empty mbufs */
263 for (m = m->m_next; m && m->m_len == 0;
274 * Warning: idat may point to garbage here, but
275 * we only use it in the while() loop, only if
276 * there are indeed enough data.
278 idat = mtod(m, unsigned char *) + k;
280 while (m->m_len >= k + blks && i > 0) {
281 if (crd->crd_flags & CRD_F_ENCRYPT) {
282 /* XOR with previous block/IV */
283 for (j = 0; j < blks; j++)
286 exf->encrypt(sw->sw_kschedule, idat);
288 } else { /* decrypt */
290 * Keep encrypted block to be used
291 * in next block's processing.
294 bcopy(idat, piv, blks);
296 bcopy(idat, iv, blks);
298 exf->decrypt(sw->sw_kschedule, idat);
300 /* XOR with previous block/IV */
301 for (j = 0; j < blks; j++)
305 bcopy(piv, iv, blks);
316 return 0; /* Done with mbuf encryption/decryption */
317 } else if (outtype == CRYPTO_BUF_IOV) {
318 struct uio *uio = (struct uio *) buf;
321 /* Find beginning of data */
322 iov = cuio_getptr(uio, crd->crd_skip, &k);
330 * If there's insufficient data at the end of
331 * an iovec, we have to do some copying.
333 if (iov->iov_len < k + blks && iov->iov_len != k) {
334 cuio_copydata(uio, k, blks, blk);
336 /* Actual encryption/decryption */
337 if (crd->crd_flags & CRD_F_ENCRYPT) {
338 /* XOR with previous block */
339 for (j = 0; j < blks; j++)
342 exf->encrypt(sw->sw_kschedule, blk);
345 * Keep encrypted block for XOR'ing
348 bcopy(blk, iv, blks);
350 } else { /* decrypt */
352 * Keep encrypted block for XOR'ing
356 bcopy(blk, piv, blks);
358 bcopy(blk, iv, blks);
360 exf->decrypt(sw->sw_kschedule, blk);
362 /* XOR with previous block */
363 for (j = 0; j < blks; j++)
367 bcopy(piv, iv, blks);
372 /* Copy back decrypted block */
373 cuio_copyback(uio, k, blks, blk);
375 /* Advance pointer */
376 iov = cuio_getptr(uio, k + blks, &k);
382 /* Could be done... */
388 * Warning: idat may point to garbage here, but
389 * we only use it in the while() loop, only if
390 * there are indeed enough data.
392 idat = (char *)iov->iov_base + k;
394 while (iov->iov_len >= k + blks && i > 0) {
395 if (crd->crd_flags & CRD_F_ENCRYPT) {
396 /* XOR with previous block/IV */
397 for (j = 0; j < blks; j++)
400 exf->encrypt(sw->sw_kschedule, idat);
402 } else { /* decrypt */
404 * Keep encrypted block to be used
405 * in next block's processing.
408 bcopy(idat, piv, blks);
410 bcopy(idat, iv, blks);
412 exf->decrypt(sw->sw_kschedule, idat);
414 /* XOR with previous block/IV */
415 for (j = 0; j < blks; j++)
419 bcopy(piv, iv, blks);
430 return 0; /* Done with mbuf encryption/decryption */
438 * Compute keyed-hash authenticator.
441 swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd,
442 struct swcr_data *sw, caddr_t buf, int outtype)
444 unsigned char aalg[AALG_MAX_RESULT_LEN];
445 struct auth_hash *axf;
449 if (sw->sw_ictx == 0)
454 bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
457 case CRYPTO_BUF_CONTIG:
458 axf->Update(&ctx, buf + crd->crd_skip, crd->crd_len);
460 case CRYPTO_BUF_MBUF:
461 err = m_apply((struct mbuf *) buf, crd->crd_skip, crd->crd_len,
462 (int (*)(caddr_t, caddr_t, unsigned int)) axf->Update,
472 switch (sw->sw_alg) {
473 case CRYPTO_MD5_HMAC:
474 case CRYPTO_SHA1_HMAC:
475 case CRYPTO_SHA2_HMAC:
476 case CRYPTO_RIPEMD160_HMAC:
477 if (sw->sw_octx == NULL)
480 axf->Final(aalg, &ctx);
481 bcopy(sw->sw_octx, &ctx, axf->ctxsize);
482 axf->Update(&ctx, aalg, axf->hashsize);
483 axf->Final(aalg, &ctx);
486 case CRYPTO_MD5_KPDK:
487 case CRYPTO_SHA1_KPDK:
488 if (sw->sw_octx == NULL)
491 axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
492 axf->Final(aalg, &ctx);
495 case CRYPTO_NULL_HMAC:
496 axf->Final(aalg, &ctx);
500 /* Inject the authentication data */
501 if (outtype == CRYPTO_BUF_CONTIG)
502 bcopy(aalg, buf + crd->crd_inject, axf->authsize);
504 m_copyback((struct mbuf *) buf, crd->crd_inject,
505 axf->authsize, aalg);
510 * Apply a compression/decompression algorithm
513 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
514 caddr_t buf, int outtype)
516 u_int8_t *data, *out;
517 struct comp_algo *cxf;
523 /* We must handle the whole buffer of data in one time
524 * then if there is not all the data in the mbuf, we must
528 MALLOC(data, u_int8_t *, crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
531 COPYDATA(outtype, buf, crd->crd_skip, crd->crd_len, data);
533 if (crd->crd_flags & CRD_F_COMP)
534 result = cxf->compress(data, crd->crd_len, &out);
536 result = cxf->decompress(data, crd->crd_len, &out);
538 FREE(data, M_CRYPTO_DATA);
542 /* Copy back the (de)compressed data. m_copyback is
543 * extending the mbuf as necessary.
545 sw->sw_size = result;
546 /* Check the compressed size when doing compression */
547 if (crd->crd_flags & CRD_F_COMP) {
548 if (result > crd->crd_len) {
549 /* Compression was useless, we lost time */
550 FREE(out, M_CRYPTO_DATA);
555 COPYBACK(outtype, buf, crd->crd_skip, result, out);
556 if (result < crd->crd_len) {
557 adj = result - crd->crd_len;
558 if (outtype == CRYPTO_BUF_MBUF) {
559 adj = result - crd->crd_len;
560 m_adj((struct mbuf *)buf, adj);
562 struct uio *uio = (struct uio *)buf;
565 adj = crd->crd_len - result;
566 ind = uio->uio_iovcnt - 1;
568 while (adj > 0 && ind >= 0) {
569 if (adj < uio->uio_iov[ind].iov_len) {
570 uio->uio_iov[ind].iov_len -= adj;
574 adj -= uio->uio_iov[ind].iov_len;
575 uio->uio_iov[ind].iov_len = 0;
581 FREE(out, M_CRYPTO_DATA);
586 * Generate a new software session.
589 swcr_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri)
591 struct swcr_data **swd;
592 struct auth_hash *axf;
593 struct enc_xform *txf;
594 struct comp_algo *cxf;
598 if (sid == NULL || cri == NULL)
602 for (i = 1; i < swcr_sesnum; i++)
603 if (swcr_sessions[i] == NULL)
606 i = 1; /* NB: to silence compiler warning */
608 if (swcr_sessions == NULL || i == swcr_sesnum) {
609 if (swcr_sessions == NULL) {
610 i = 1; /* We leave swcr_sessions[0] empty */
611 swcr_sesnum = CRYPTO_SW_SESSIONS;
615 swd = malloc(swcr_sesnum * sizeof(struct swcr_data *),
616 M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
618 /* Reset session number */
619 if (swcr_sesnum == CRYPTO_SW_SESSIONS)
626 /* Copy existing sessions */
628 bcopy(swcr_sessions, swd,
629 (swcr_sesnum / 2) * sizeof(struct swcr_data *));
630 free(swcr_sessions, M_CRYPTO_DATA);
636 swd = &swcr_sessions[i];
640 MALLOC(*swd, struct swcr_data *, sizeof(struct swcr_data),
641 M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
643 swcr_freesession(NULL, i);
647 switch (cri->cri_alg) {
649 txf = &enc_xform_des;
651 case CRYPTO_3DES_CBC:
652 txf = &enc_xform_3des;
655 txf = &enc_xform_blf;
657 case CRYPTO_CAST_CBC:
658 txf = &enc_xform_cast5;
660 case CRYPTO_SKIPJACK_CBC:
661 txf = &enc_xform_skipjack;
663 case CRYPTO_RIJNDAEL128_CBC:
664 txf = &enc_xform_rijndael128;
666 case CRYPTO_NULL_CBC:
667 txf = &enc_xform_null;
670 error = txf->setkey(&((*swd)->sw_kschedule),
671 cri->cri_key, cri->cri_klen / 8);
673 swcr_freesession(NULL, i);
676 (*swd)->sw_exf = txf;
679 case CRYPTO_MD5_HMAC:
680 axf = &auth_hash_hmac_md5_96;
682 case CRYPTO_SHA1_HMAC:
683 axf = &auth_hash_hmac_sha1_96;
685 case CRYPTO_SHA2_HMAC:
686 if (cri->cri_klen == 256)
687 axf = &auth_hash_hmac_sha2_256;
688 else if (cri->cri_klen == 384)
689 axf = &auth_hash_hmac_sha2_384;
690 else if (cri->cri_klen == 512)
691 axf = &auth_hash_hmac_sha2_512;
693 swcr_freesession(NULL, i);
697 case CRYPTO_NULL_HMAC:
698 axf = &auth_hash_null;
700 case CRYPTO_RIPEMD160_HMAC:
701 axf = &auth_hash_hmac_ripemd_160_96;
703 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
705 if ((*swd)->sw_ictx == NULL) {
706 swcr_freesession(NULL, i);
710 (*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
712 if ((*swd)->sw_octx == NULL) {
713 swcr_freesession(NULL, i);
717 for (k = 0; k < cri->cri_klen / 8; k++)
718 cri->cri_key[k] ^= HMAC_IPAD_VAL;
720 axf->Init((*swd)->sw_ictx);
721 axf->Update((*swd)->sw_ictx, cri->cri_key,
723 axf->Update((*swd)->sw_ictx, hmac_ipad_buffer,
724 HMAC_BLOCK_LEN - (cri->cri_klen / 8));
726 for (k = 0; k < cri->cri_klen / 8; k++)
727 cri->cri_key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
729 axf->Init((*swd)->sw_octx);
730 axf->Update((*swd)->sw_octx, cri->cri_key,
732 axf->Update((*swd)->sw_octx, hmac_opad_buffer,
733 HMAC_BLOCK_LEN - (cri->cri_klen / 8));
735 for (k = 0; k < cri->cri_klen / 8; k++)
736 cri->cri_key[k] ^= HMAC_OPAD_VAL;
737 (*swd)->sw_axf = axf;
740 case CRYPTO_MD5_KPDK:
741 axf = &auth_hash_key_md5;
744 case CRYPTO_SHA1_KPDK:
745 axf = &auth_hash_key_sha1;
747 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
749 if ((*swd)->sw_ictx == NULL) {
750 swcr_freesession(NULL, i);
754 /* Store the key so we can "append" it to the payload */
755 (*swd)->sw_octx = malloc(cri->cri_klen / 8, M_CRYPTO_DATA,
757 if ((*swd)->sw_octx == NULL) {
758 swcr_freesession(NULL, i);
762 (*swd)->sw_klen = cri->cri_klen / 8;
763 bcopy(cri->cri_key, (*swd)->sw_octx, cri->cri_klen / 8);
764 axf->Init((*swd)->sw_ictx);
765 axf->Update((*swd)->sw_ictx, cri->cri_key,
767 axf->Final(NULL, (*swd)->sw_ictx);
768 (*swd)->sw_axf = axf;
772 axf = &auth_hash_md5;
776 axf = &auth_hash_sha1;
778 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
780 if ((*swd)->sw_ictx == NULL) {
781 swcr_freesession(NULL, i);
785 axf->Init((*swd)->sw_ictx);
786 (*swd)->sw_axf = axf;
789 case CRYPTO_DEFLATE_COMP:
790 cxf = &comp_algo_deflate;
791 (*swd)->sw_cxf = cxf;
794 swcr_freesession(NULL, i);
798 (*swd)->sw_alg = cri->cri_alg;
800 swd = &((*swd)->sw_next);
809 swcr_freesession(void *arg, u_int64_t tid)
811 struct swcr_data *swd;
812 struct enc_xform *txf;
813 struct auth_hash *axf;
814 struct comp_algo *cxf;
815 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
817 if (sid > swcr_sesnum || swcr_sessions == NULL ||
818 swcr_sessions[sid] == NULL)
821 /* Silently accept and return */
825 while ((swd = swcr_sessions[sid]) != NULL) {
826 swcr_sessions[sid] = swd->sw_next;
828 switch (swd->sw_alg) {
830 case CRYPTO_3DES_CBC:
832 case CRYPTO_CAST_CBC:
833 case CRYPTO_SKIPJACK_CBC:
834 case CRYPTO_RIJNDAEL128_CBC:
835 case CRYPTO_NULL_CBC:
838 if (swd->sw_kschedule)
839 txf->zerokey(&(swd->sw_kschedule));
842 case CRYPTO_MD5_HMAC:
843 case CRYPTO_SHA1_HMAC:
844 case CRYPTO_SHA2_HMAC:
845 case CRYPTO_RIPEMD160_HMAC:
846 case CRYPTO_NULL_HMAC:
850 bzero(swd->sw_ictx, axf->ctxsize);
851 free(swd->sw_ictx, M_CRYPTO_DATA);
854 bzero(swd->sw_octx, axf->ctxsize);
855 free(swd->sw_octx, M_CRYPTO_DATA);
859 case CRYPTO_MD5_KPDK:
860 case CRYPTO_SHA1_KPDK:
864 bzero(swd->sw_ictx, axf->ctxsize);
865 free(swd->sw_ictx, M_CRYPTO_DATA);
868 bzero(swd->sw_octx, swd->sw_klen);
869 free(swd->sw_octx, M_CRYPTO_DATA);
878 free(swd->sw_ictx, M_CRYPTO_DATA);
881 case CRYPTO_DEFLATE_COMP:
886 FREE(swd, M_CRYPTO_DATA);
892 * Process a software request.
895 swcr_process(void *arg, struct cryptop *crp, int hint)
897 struct cryptodesc *crd;
898 struct swcr_data *sw;
906 if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
907 crp->crp_etype = EINVAL;
911 lid = crp->crp_sid & 0xffffffff;
912 if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
913 crp->crp_etype = ENOENT;
917 if (crp->crp_flags & CRYPTO_F_IMBUF) {
918 type = CRYPTO_BUF_MBUF;
919 } else if (crp->crp_flags & CRYPTO_F_IOV) {
920 type = CRYPTO_BUF_IOV;
922 type = CRYPTO_BUF_CONTIG;
925 /* Go through crypto descriptors, processing as we go */
926 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
928 * Find the crypto context.
930 * XXX Note that the logic here prevents us from having
931 * XXX the same algorithm multiple times in a session
932 * XXX (or rather, we can but it won't give us the right
933 * XXX results). To do that, we'd need some way of differentiating
934 * XXX between the various instances of an algorithm (so we can
935 * XXX locate the correct crypto context).
937 for (sw = swcr_sessions[lid];
938 sw && sw->sw_alg != crd->crd_alg;
942 /* No such context ? */
944 crp->crp_etype = EINVAL;
947 switch (sw->sw_alg) {
949 case CRYPTO_3DES_CBC:
951 case CRYPTO_CAST_CBC:
952 case CRYPTO_SKIPJACK_CBC:
953 case CRYPTO_RIJNDAEL128_CBC:
954 if ((crp->crp_etype = swcr_encdec(crd, sw,
955 crp->crp_buf, type)) != 0)
958 case CRYPTO_NULL_CBC:
961 case CRYPTO_MD5_HMAC:
962 case CRYPTO_SHA1_HMAC:
963 case CRYPTO_SHA2_HMAC:
964 case CRYPTO_RIPEMD160_HMAC:
965 case CRYPTO_NULL_HMAC:
966 case CRYPTO_MD5_KPDK:
967 case CRYPTO_SHA1_KPDK:
970 if ((crp->crp_etype = swcr_authcompute(crp, crd, sw,
971 crp->crp_buf, type)) != 0)
975 case CRYPTO_DEFLATE_COMP:
976 if ((crp->crp_etype = swcr_compdec(crd, sw,
977 crp->crp_buf, type)) != 0)
980 crp->crp_olen = (int)sw->sw_size;
984 /* Unknown/unsupported algorithm */
985 crp->crp_etype = EINVAL;
996 * Initialize the driver, called from the kernel main().
1001 swcr_id = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE);
1003 panic("Software crypto device cannot initialize!");
1004 crypto_register(swcr_id, CRYPTO_DES_CBC,
1005 0, 0, swcr_newsession, swcr_freesession, swcr_process, NULL);
1006 #define REGISTER(alg) \
1007 crypto_register(swcr_id, alg, 0,0,NULL,NULL,NULL,NULL)
1008 REGISTER(CRYPTO_3DES_CBC);
1009 REGISTER(CRYPTO_BLF_CBC);
1010 REGISTER(CRYPTO_CAST_CBC);
1011 REGISTER(CRYPTO_SKIPJACK_CBC);
1012 REGISTER(CRYPTO_NULL_CBC);
1013 REGISTER(CRYPTO_MD5_HMAC);
1014 REGISTER(CRYPTO_SHA1_HMAC);
1015 REGISTER(CRYPTO_SHA2_HMAC);
1016 REGISTER(CRYPTO_RIPEMD160_HMAC);
1017 REGISTER(CRYPTO_NULL_HMAC);
1018 REGISTER(CRYPTO_MD5_KPDK);
1019 REGISTER(CRYPTO_SHA1_KPDK);
1020 REGISTER(CRYPTO_MD5);
1021 REGISTER(CRYPTO_SHA1);
1022 REGISTER(CRYPTO_RIJNDAEL128_CBC);
1023 REGISTER(CRYPTO_DEFLATE_COMP);
1026 SYSINIT(cryptosoft_init, SI_SUB_PSEUDO, SI_ORDER_ANY, swcr_init, NULL)