2 * Copyright (c) 2003 Sam Leffler, Errno Consulting
3 * Copyright (c) 2003 Global Technology Associates, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/dev/safe/safe.c,v 1.22 2011/06/12 23:33:08 delphij Exp $
31 * SafeNet SafeXcel-1141 hardware crypto accelerator
35 #include <sys/param.h>
36 #include <sys/systm.h>
38 #include <sys/errno.h>
39 #include <sys/malloc.h>
40 #include <sys/kernel.h>
42 #include <sys/module.h>
44 #include <sys/sysctl.h>
45 #include <sys/endian.h>
53 #include <crypto/sha1.h>
54 #include <opencrypto/cryptodev.h>
55 #include <opencrypto/cryptosoft.h>
57 #include <sys/random.h>
60 #include "cryptodev_if.h"
62 #include <bus/pci/pcivar.h>
63 #include <bus/pci/pcireg.h>
66 #include <dev/crypto/rndtest/rndtest.h>
68 #include <dev/crypto/safe/safereg.h>
69 #include <dev/crypto/safe/safevar.h>
76 * Prototypes and count for the pci_device structure
78 static int safe_probe(device_t);
79 static int safe_attach(device_t);
80 static int safe_detach(device_t);
81 static int safe_suspend(device_t);
82 static int safe_resume(device_t);
83 static int safe_shutdown(device_t);
85 static int safe_newsession(device_t, u_int32_t *, struct cryptoini *);
86 static int safe_freesession(device_t, u_int64_t);
87 static int safe_process(device_t, struct cryptop *, int);
89 static device_method_t safe_methods[] = {
90 /* Device interface */
91 DEVMETHOD(device_probe, safe_probe),
92 DEVMETHOD(device_attach, safe_attach),
93 DEVMETHOD(device_detach, safe_detach),
94 DEVMETHOD(device_suspend, safe_suspend),
95 DEVMETHOD(device_resume, safe_resume),
96 DEVMETHOD(device_shutdown, safe_shutdown),
99 DEVMETHOD(bus_print_child, bus_generic_print_child),
100 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
102 /* crypto device methods */
103 DEVMETHOD(cryptodev_newsession, safe_newsession),
104 DEVMETHOD(cryptodev_freesession,safe_freesession),
105 DEVMETHOD(cryptodev_process, safe_process),
109 static driver_t safe_driver = {
112 sizeof (struct safe_softc)
114 static devclass_t safe_devclass;
116 DRIVER_MODULE(safe, pci, safe_driver, safe_devclass, NULL, NULL);
117 MODULE_DEPEND(safe, crypto, 1, 1, 1);
119 MODULE_DEPEND(safe, rndtest, 1, 1, 1);
122 static void safe_intr(void *);
123 static void safe_callback(struct safe_softc *, struct safe_ringentry *);
124 static void safe_feed(struct safe_softc *, struct safe_ringentry *);
125 static void safe_mcopy(struct mbuf *, struct mbuf *, u_int);
127 static void safe_rng_init(struct safe_softc *);
128 static void safe_rng(void *);
129 #endif /* SAFE_NO_RNG */
130 static int safe_dma_malloc(struct safe_softc *, bus_size_t,
131 struct safe_dma_alloc *, int);
132 #define safe_dma_sync(_dma, _flags) \
133 bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags))
134 static void safe_dma_free(struct safe_softc *, struct safe_dma_alloc *);
135 static int safe_dmamap_aligned(const struct safe_operand *);
136 static int safe_dmamap_uniform(const struct safe_operand *);
138 static void safe_reset_board(struct safe_softc *);
139 static void safe_init_board(struct safe_softc *);
140 static void safe_init_pciregs(device_t dev);
141 static void safe_cleanchip(struct safe_softc *);
142 static void safe_totalreset(struct safe_softc *);
144 static int safe_free_entry(struct safe_softc *, struct safe_ringentry *);
146 SYSCTL_NODE(_hw, OID_AUTO, safe, CTLFLAG_RD, 0, "SafeNet driver parameters");
149 static void safe_dump_dmastatus(struct safe_softc *, const char *);
150 static void safe_dump_ringstate(struct safe_softc *, const char *);
151 static void safe_dump_intrstate(struct safe_softc *, const char *);
152 static void safe_dump_request(struct safe_softc *, const char *,
153 struct safe_ringentry *);
155 static struct safe_softc *safec; /* for use by hw.safe.dump */
157 static int safe_debug = 0;
158 SYSCTL_INT(_hw_safe, OID_AUTO, debug, CTLFLAG_RW, &safe_debug,
159 0, "control debugging msgs");
160 #define DPRINTF(_x) if (safe_debug) kprintf _x
165 #define READ_REG(sc,r) \
166 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r))
168 #define WRITE_REG(sc,reg,val) \
169 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val)
171 struct safe_stats safestats;
172 SYSCTL_STRUCT(_hw_safe, OID_AUTO, stats, CTLFLAG_RD, &safestats,
173 safe_stats, "driver statistics");
175 static int safe_rnginterval = 1; /* poll once a second */
176 SYSCTL_INT(_hw_safe, OID_AUTO, rnginterval, CTLFLAG_RW, &safe_rnginterval,
177 0, "RNG polling interval (secs)");
178 static int safe_rngbufsize = 16; /* 64 bytes each poll */
179 SYSCTL_INT(_hw_safe, OID_AUTO, rngbufsize, CTLFLAG_RW, &safe_rngbufsize,
180 0, "RNG polling buffer size (32-bit words)");
181 static int safe_rngmaxalarm = 8; /* max alarms before reset */
182 SYSCTL_INT(_hw_safe, OID_AUTO, rngmaxalarm, CTLFLAG_RW, &safe_rngmaxalarm,
183 0, "RNG max alarms before reset");
184 #endif /* SAFE_NO_RNG */
187 safe_probe(device_t dev)
189 if (pci_get_vendor(dev) == PCI_VENDOR_SAFENET &&
190 pci_get_device(dev) == PCI_PRODUCT_SAFEXCEL)
191 return (BUS_PROBE_DEFAULT);
196 safe_partname(struct safe_softc *sc)
198 /* XXX sprintf numbers when not decoded */
199 switch (pci_get_vendor(sc->sc_dev)) {
200 case PCI_VENDOR_SAFENET:
201 switch (pci_get_device(sc->sc_dev)) {
202 case PCI_PRODUCT_SAFEXCEL: return "SafeNet SafeXcel-1141";
204 return "SafeNet unknown-part";
206 return "Unknown-vendor unknown-part";
211 default_harvest(struct rndtest_state *rsp, void *buf, u_int count)
213 add_buffer_randomness_src(buf, count, RAND_SRC_SAFE);
215 #endif /* SAFE_NO_RNG */
218 safe_attach(device_t dev)
220 struct safe_softc *sc = device_get_softc(dev);
222 u_int32_t cmd, i, devinfo;
225 bzero(sc, sizeof (*sc));
228 /* XXX handle power management */
230 cmd = pci_read_config(dev, PCIR_COMMAND, 4);
231 cmd |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN;
232 pci_write_config(dev, PCIR_COMMAND, cmd, 4);
233 cmd = pci_read_config(dev, PCIR_COMMAND, 4);
235 if (!(cmd & PCIM_CMD_MEMEN)) {
236 device_printf(dev, "failed to enable memory mapping\n");
240 if (!(cmd & PCIM_CMD_BUSMASTEREN)) {
241 device_printf(dev, "failed to enable bus mastering\n");
246 * Setup memory-mapping of PCI registers.
249 sc->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
251 if (sc->sc_sr == NULL) {
252 device_printf(dev, "cannot map register space\n");
255 sc->sc_st = rman_get_bustag(sc->sc_sr);
256 sc->sc_sh = rman_get_bushandle(sc->sc_sr);
259 * Arrange interrupt line.
262 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
263 RF_SHAREABLE|RF_ACTIVE);
264 if (sc->sc_irq == NULL) {
265 device_printf(dev, "could not map interrupt\n");
269 * NB: Network code assumes we are blocked with splimp()
270 * so make sure the IRQ is mapped appropriately.
272 if (bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
273 safe_intr, sc, &sc->sc_ih, NULL)) {
274 device_printf(dev, "could not establish interrupt\n");
278 sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
279 if (sc->sc_cid < 0) {
280 device_printf(dev, "could not get crypto driver id\n");
284 sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) &
285 (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN);
288 * Setup DMA descriptor area.
290 if (bus_dma_tag_create(NULL, /* parent */
292 SAFE_DMA_BOUNDARY, /* boundary */
293 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
294 BUS_SPACE_MAXADDR, /* highaddr */
295 NULL, NULL, /* filter, filterarg */
296 SAFE_MAX_DMA, /* maxsize */
297 SAFE_MAX_PART, /* nsegments */
298 SAFE_MAX_SSIZE, /* maxsegsize */
299 BUS_DMA_ALLOCNOW, /* flags */
301 device_printf(dev, "cannot allocate DMA tag\n");
304 if (bus_dma_tag_create(NULL, /* parent */
306 SAFE_MAX_DSIZE, /* boundary */
307 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
308 BUS_SPACE_MAXADDR, /* highaddr */
309 NULL, NULL, /* filter, filterarg */
310 SAFE_MAX_DMA, /* maxsize */
311 SAFE_MAX_PART, /* nsegments */
312 SAFE_MAX_DSIZE, /* maxsegsize */
313 BUS_DMA_ALLOCNOW, /* flags */
315 device_printf(dev, "cannot allocate DMA tag\n");
320 * Allocate packet engine descriptors.
322 if (safe_dma_malloc(sc,
323 SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
324 &sc->sc_ringalloc, 0)) {
325 device_printf(dev, "cannot allocate PE descriptor ring\n");
326 bus_dma_tag_destroy(sc->sc_srcdmat);
330 * Hookup the static portion of all our data structures.
332 sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr;
333 sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE;
334 sc->sc_front = sc->sc_ring;
335 sc->sc_back = sc->sc_ring;
336 raddr = sc->sc_ringalloc.dma_paddr;
337 bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry));
338 for (i = 0; i < SAFE_MAX_NQUEUE; i++) {
339 struct safe_ringentry *re = &sc->sc_ring[i];
341 re->re_desc.d_sa = raddr +
342 offsetof(struct safe_ringentry, re_sa);
343 re->re_sa.sa_staterec = raddr +
344 offsetof(struct safe_ringentry, re_sastate);
346 raddr += sizeof (struct safe_ringentry);
348 lockinit(&sc->sc_ringlock, "packet engine ring", 0, LK_CANRECURSE);
351 * Allocate scatter and gather particle descriptors.
353 if (safe_dma_malloc(sc, SAFE_TOTAL_SPART * sizeof (struct safe_pdesc),
354 &sc->sc_spalloc, 0)) {
355 device_printf(dev, "cannot allocate source particle "
356 "descriptor ring\n");
357 lockuninit(&sc->sc_ringlock);
358 safe_dma_free(sc, &sc->sc_ringalloc);
359 bus_dma_tag_destroy(sc->sc_srcdmat);
362 sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr;
363 sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART;
364 sc->sc_spfree = sc->sc_spring;
365 bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc));
367 if (safe_dma_malloc(sc, SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
368 &sc->sc_dpalloc, 0)) {
369 device_printf(dev, "cannot allocate destination particle "
370 "descriptor ring\n");
371 lockuninit(&sc->sc_ringlock);
372 safe_dma_free(sc, &sc->sc_spalloc);
373 safe_dma_free(sc, &sc->sc_ringalloc);
374 bus_dma_tag_destroy(sc->sc_dstdmat);
377 sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr;
378 sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART;
379 sc->sc_dpfree = sc->sc_dpring;
380 bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc));
382 device_printf(sc->sc_dev, "%s", safe_partname(sc));
384 devinfo = READ_REG(sc, SAFE_DEVINFO);
385 if (devinfo & SAFE_DEVINFO_RNG) {
386 sc->sc_flags |= SAFE_FLAGS_RNG;
389 if (devinfo & SAFE_DEVINFO_PKEY) {
392 sc->sc_flags |= SAFE_FLAGS_KEY;
393 crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0);
394 crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0);
397 if (devinfo & SAFE_DEVINFO_DES) {
398 kprintf(" des/3des");
399 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
400 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
402 if (devinfo & SAFE_DEVINFO_AES) {
404 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
406 if (devinfo & SAFE_DEVINFO_MD5) {
408 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
410 if (devinfo & SAFE_DEVINFO_SHA1) {
412 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
415 crypto_register(sc->sc_cid, CRYPTO_NULL_CBC, 0, 0);
416 crypto_register(sc->sc_cid, CRYPTO_NULL_HMAC, 0, 0);
417 /* XXX other supported algorithms */
420 safe_reset_board(sc); /* reset h/w */
421 safe_init_pciregs(dev); /* init pci settings */
422 safe_init_board(sc); /* init h/w */
425 if (sc->sc_flags & SAFE_FLAGS_RNG) {
427 sc->sc_rndtest = rndtest_attach(dev);
429 sc->sc_harvest = rndtest_harvest;
431 sc->sc_harvest = default_harvest;
433 sc->sc_harvest = default_harvest;
437 callout_init_mp(&sc->sc_rngto);
438 callout_reset(&sc->sc_rngto, hz*safe_rnginterval, safe_rng, sc);
440 #endif /* SAFE_NO_RNG */
442 safec = sc; /* for use by hw.safe.dump */
446 crypto_unregister_all(sc->sc_cid);
448 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
450 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
452 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr);
458 * Detach a device that successfully probed.
461 safe_detach(device_t dev)
463 struct safe_softc *sc = device_get_softc(dev);
465 /* XXX wait/abort active ops */
467 WRITE_REG(sc, SAFE_HI_MASK, 0); /* disable interrupts */
469 callout_stop(&sc->sc_rngto);
471 crypto_unregister_all(sc->sc_cid);
475 rndtest_detach(sc->sc_rndtest);
479 safe_dma_free(sc, &sc->sc_dpalloc);
480 safe_dma_free(sc, &sc->sc_spalloc);
481 lockuninit(&sc->sc_ringlock);
482 safe_dma_free(sc, &sc->sc_ringalloc);
484 bus_generic_detach(dev);
485 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
486 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
488 bus_dma_tag_destroy(sc->sc_srcdmat);
489 bus_dma_tag_destroy(sc->sc_dstdmat);
490 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr);
496 * Stop all chip i/o so that the kernel's probe routines don't
497 * get confused by errant DMAs when rebooting.
500 safe_shutdown(device_t dev)
503 safe_stop(device_get_softc(dev));
509 * Device suspend routine.
512 safe_suspend(device_t dev)
514 struct safe_softc *sc = device_get_softc(dev);
517 /* XXX stop the device and save PCI settings */
519 sc->sc_suspended = 1;
525 safe_resume(device_t dev)
527 struct safe_softc *sc = device_get_softc(dev);
530 /* XXX retore PCI settings and start the device */
532 sc->sc_suspended = 0;
537 * SafeXcel Interrupt routine
542 struct safe_softc *sc = arg;
543 volatile u_int32_t stat;
545 stat = READ_REG(sc, SAFE_HM_STAT);
546 if (stat == 0) /* shared irq, not for us */
549 WRITE_REG(sc, SAFE_HI_CLR, stat); /* IACK */
551 if ((stat & SAFE_INT_PE_DDONE)) {
553 * Descriptor(s) done; scan the ring and
554 * process completed operations.
556 lockmgr(&sc->sc_ringlock, LK_EXCLUSIVE);
557 while (sc->sc_back != sc->sc_front) {
558 struct safe_ringentry *re = sc->sc_back;
561 safe_dump_ringstate(sc, __func__);
562 safe_dump_request(sc, __func__, re);
566 * safe_process marks ring entries that were allocated
567 * but not used with a csr of zero. This insures the
568 * ring front pointer never needs to be set backwards
569 * in the event that an entry is allocated but not used
570 * because of a setup error.
572 if (re->re_desc.d_csr != 0) {
573 if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr))
575 if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len))
578 safe_callback(sc, re);
580 if (++(sc->sc_back) == sc->sc_ringtop)
581 sc->sc_back = sc->sc_ring;
583 lockmgr(&sc->sc_ringlock, LK_RELEASE);
587 * Check to see if we got any DMA Error
589 if (stat & SAFE_INT_PE_ERROR) {
590 DPRINTF(("dmaerr dmastat %08x\n",
591 READ_REG(sc, SAFE_PE_DMASTAT)));
592 safestats.st_dmaerr++;
599 if (sc->sc_needwakeup) { /* XXX check high watermark */
600 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
601 DPRINTF(("%s: wakeup crypto %x\n", __func__,
603 sc->sc_needwakeup &= ~wakeup;
604 crypto_unblock(sc->sc_cid, wakeup);
609 * safe_feed() - post a request to chip
612 safe_feed(struct safe_softc *sc, struct safe_ringentry *re)
614 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_PREWRITE);
615 if (re->re_dst_map != NULL)
616 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map,
617 BUS_DMASYNC_PREREAD);
618 /* XXX have no smaller granularity */
619 safe_dma_sync(&sc->sc_ringalloc,
620 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
621 safe_dma_sync(&sc->sc_spalloc, BUS_DMASYNC_PREWRITE);
622 safe_dma_sync(&sc->sc_dpalloc, BUS_DMASYNC_PREWRITE);
626 safe_dump_ringstate(sc, __func__);
627 safe_dump_request(sc, __func__, re);
631 if (sc->sc_nqchip > safestats.st_maxqchip)
632 safestats.st_maxqchip = sc->sc_nqchip;
633 /* poke h/w to check descriptor ring, any value can be written */
634 WRITE_REG(sc, SAFE_HI_RD_DESCR, 0);
638 safe_setup_enckey(struct safe_session *ses, caddr_t key)
642 bcopy(key, ses->ses_key, ses->ses_klen / 8);
644 /* PE is little-endian, insure proper byte order */
645 for (i = 0; i < nitems(ses->ses_key); i++)
646 ses->ses_key[i] = htole32(ses->ses_key[i]);
650 safe_setup_mackey(struct safe_session *ses, int algo, caddr_t key, int klen)
657 for (i = 0; i < klen; i++)
658 key[i] ^= HMAC_IPAD_VAL;
660 if (algo == CRYPTO_MD5_HMAC) {
662 MD5Update(&md5ctx, key, klen);
663 MD5Update(&md5ctx, hmac_ipad_buffer, MD5_HMAC_BLOCK_LEN - klen);
664 bcopy(&md5ctx.A, ses->ses_hminner, sizeof(md5ctx.A) * 4);
667 SHA1Update(&sha1ctx, key, klen);
668 SHA1Update(&sha1ctx, hmac_ipad_buffer,
669 SHA1_HMAC_BLOCK_LEN - klen);
670 bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32));
673 for (i = 0; i < klen; i++)
674 key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
676 if (algo == CRYPTO_MD5_HMAC) {
678 MD5Update(&md5ctx, key, klen);
679 MD5Update(&md5ctx, hmac_opad_buffer, MD5_HMAC_BLOCK_LEN - klen);
680 bcopy(&md5ctx.A, ses->ses_hmouter, sizeof(md5ctx.A) * 4);
683 SHA1Update(&sha1ctx, key, klen);
684 SHA1Update(&sha1ctx, hmac_opad_buffer,
685 SHA1_HMAC_BLOCK_LEN - klen);
686 bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32));
689 for (i = 0; i < klen; i++)
690 key[i] ^= HMAC_OPAD_VAL;
692 /* PE is little-endian, insure proper byte order */
693 for (i = 0; i < nitems(ses->ses_hminner); i++) {
694 ses->ses_hminner[i] = htole32(ses->ses_hminner[i]);
695 ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]);
700 * Allocate a new 'session' and return an encoded session id. 'sidp'
701 * contains our registration id, and should contain an encoded session
702 * id on successful allocation.
705 safe_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
707 struct safe_softc *sc = device_get_softc(dev);
708 struct cryptoini *c, *encini = NULL, *macini = NULL;
709 struct safe_session *ses = NULL;
712 if (sidp == NULL || cri == NULL || sc == NULL)
715 for (c = cri; c != NULL; c = c->cri_next) {
716 if (c->cri_alg == CRYPTO_MD5_HMAC ||
717 c->cri_alg == CRYPTO_SHA1_HMAC ||
718 c->cri_alg == CRYPTO_NULL_HMAC) {
722 } else if (c->cri_alg == CRYPTO_DES_CBC ||
723 c->cri_alg == CRYPTO_3DES_CBC ||
724 c->cri_alg == CRYPTO_AES_CBC ||
725 c->cri_alg == CRYPTO_NULL_CBC) {
732 if (encini == NULL && macini == NULL)
734 if (encini) { /* validate key length */
735 switch (encini->cri_alg) {
737 if (encini->cri_klen != 64)
740 case CRYPTO_3DES_CBC:
741 if (encini->cri_klen != 192)
745 if (encini->cri_klen != 128 &&
746 encini->cri_klen != 192 &&
747 encini->cri_klen != 256)
753 if (sc->sc_sessions == NULL) {
754 ses = sc->sc_sessions = (struct safe_session *)kmalloc(
755 sizeof(struct safe_session), M_DEVBUF, M_NOWAIT);
759 sc->sc_nsessions = 1;
761 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
762 if (sc->sc_sessions[sesn].ses_used == 0) {
763 ses = &sc->sc_sessions[sesn];
769 sesn = sc->sc_nsessions;
770 ses = (struct safe_session *)kmalloc((sesn + 1) *
771 sizeof(struct safe_session), M_DEVBUF, M_NOWAIT);
774 bcopy(sc->sc_sessions, ses, sesn *
775 sizeof(struct safe_session));
776 bzero(sc->sc_sessions, sesn *
777 sizeof(struct safe_session));
778 kfree(sc->sc_sessions, M_DEVBUF);
779 sc->sc_sessions = ses;
780 ses = &sc->sc_sessions[sesn];
785 bzero(ses, sizeof(struct safe_session));
790 /* XXX may read fewer than requested */
791 read_random(ses->ses_iv, sizeof(ses->ses_iv));
793 ses->ses_klen = encini->cri_klen;
794 if (encini->cri_key != NULL)
795 safe_setup_enckey(ses, encini->cri_key);
799 ses->ses_mlen = macini->cri_mlen;
800 if (ses->ses_mlen == 0) {
801 if (macini->cri_alg == CRYPTO_MD5_HMAC)
802 ses->ses_mlen = MD5_HASH_LEN;
804 ses->ses_mlen = SHA1_HASH_LEN;
807 if (macini->cri_key != NULL) {
808 safe_setup_mackey(ses, macini->cri_alg, macini->cri_key,
809 macini->cri_klen / 8);
813 *sidp = SAFE_SID(device_get_unit(sc->sc_dev), sesn);
818 * Deallocate a session.
821 safe_freesession(device_t dev, u_int64_t tid)
823 struct safe_softc *sc = device_get_softc(dev);
825 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
830 session = SAFE_SESSION(sid);
831 if (session < sc->sc_nsessions) {
832 bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
840 safe_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error)
842 struct safe_operand *op = arg;
844 DPRINTF(("%s: mapsize %u nsegs %d error %d\n", __func__,
845 (u_int) mapsize, nsegs, error));
848 op->mapsize = mapsize;
850 bcopy(seg, op->segs, nsegs * sizeof (seg[0]));
854 safe_process(device_t dev, struct cryptop *crp, int hint)
856 struct safe_softc *sc = device_get_softc(dev);
857 int err = 0, i, nicealign, uniform;
858 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
859 int bypass, oplen, ivsize;
862 struct safe_session *ses;
863 struct safe_ringentry *re;
864 struct safe_sarec *sa;
865 struct safe_pdesc *pd;
866 u_int32_t cmd0, cmd1, staterec;
868 if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
869 safestats.st_invalid++;
872 if (SAFE_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
873 safestats.st_badsession++;
877 lockmgr(&sc->sc_ringlock, LK_EXCLUSIVE);
878 if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) {
879 safestats.st_ringfull++;
880 sc->sc_needwakeup |= CRYPTO_SYMQ;
881 lockmgr(&sc->sc_ringlock, LK_RELEASE);
886 staterec = re->re_sa.sa_staterec; /* save */
887 /* NB: zero everything but the PE descriptor */
888 bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc));
889 re->re_sa.sa_staterec = staterec; /* restore */
892 re->re_sesn = SAFE_SESSION(crp->crp_sid);
894 if (crp->crp_flags & CRYPTO_F_IMBUF) {
895 re->re_src_m = (struct mbuf *)crp->crp_buf;
896 re->re_dst_m = (struct mbuf *)crp->crp_buf;
897 } else if (crp->crp_flags & CRYPTO_F_IOV) {
898 re->re_src_io = (struct uio *)crp->crp_buf;
899 re->re_dst_io = (struct uio *)crp->crp_buf;
901 safestats.st_badflags++;
903 goto errout; /* XXX we don't handle contiguous blocks! */
907 ses = &sc->sc_sessions[re->re_sesn];
909 crd1 = crp->crp_desc;
911 safestats.st_nodesc++;
915 crd2 = crd1->crd_next;
917 cmd0 = SAFE_SA_CMD0_BASIC; /* basic group operation */
920 if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
921 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
922 crd1->crd_alg == CRYPTO_NULL_HMAC) {
925 cmd0 |= SAFE_SA_CMD0_OP_HASH;
926 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
927 crd1->crd_alg == CRYPTO_3DES_CBC ||
928 crd1->crd_alg == CRYPTO_AES_CBC ||
929 crd1->crd_alg == CRYPTO_NULL_CBC) {
932 cmd0 |= SAFE_SA_CMD0_OP_CRYPT;
934 safestats.st_badalg++;
939 if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
940 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
941 crd1->crd_alg == CRYPTO_NULL_HMAC) &&
942 (crd2->crd_alg == CRYPTO_DES_CBC ||
943 crd2->crd_alg == CRYPTO_3DES_CBC ||
944 crd2->crd_alg == CRYPTO_AES_CBC ||
945 crd2->crd_alg == CRYPTO_NULL_CBC) &&
946 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
949 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
950 crd1->crd_alg == CRYPTO_3DES_CBC ||
951 crd1->crd_alg == CRYPTO_AES_CBC ||
952 crd1->crd_alg == CRYPTO_NULL_CBC) &&
953 (crd2->crd_alg == CRYPTO_MD5_HMAC ||
954 crd2->crd_alg == CRYPTO_SHA1_HMAC ||
955 crd2->crd_alg == CRYPTO_NULL_HMAC) &&
956 (crd1->crd_flags & CRD_F_ENCRYPT)) {
960 safestats.st_badalg++;
964 cmd0 |= SAFE_SA_CMD0_OP_BOTH;
968 if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
969 safe_setup_enckey(ses, enccrd->crd_key);
971 if (enccrd->crd_alg == CRYPTO_DES_CBC) {
972 cmd0 |= SAFE_SA_CMD0_DES;
973 cmd1 |= SAFE_SA_CMD1_CBC;
974 ivsize = 2*sizeof(u_int32_t);
975 } else if (enccrd->crd_alg == CRYPTO_3DES_CBC) {
976 cmd0 |= SAFE_SA_CMD0_3DES;
977 cmd1 |= SAFE_SA_CMD1_CBC;
978 ivsize = 2*sizeof(u_int32_t);
979 } else if (enccrd->crd_alg == CRYPTO_AES_CBC) {
980 cmd0 |= SAFE_SA_CMD0_AES;
981 cmd1 |= SAFE_SA_CMD1_CBC;
982 if (ses->ses_klen == 128)
983 cmd1 |= SAFE_SA_CMD1_AES128;
984 else if (ses->ses_klen == 192)
985 cmd1 |= SAFE_SA_CMD1_AES192;
987 cmd1 |= SAFE_SA_CMD1_AES256;
988 ivsize = 4*sizeof(u_int32_t);
990 cmd0 |= SAFE_SA_CMD0_CRYPT_NULL;
995 * Setup encrypt/decrypt state. When using basic ops
996 * we can't use an inline IV because hash/crypt offset
997 * must be from the end of the IV to the start of the
998 * crypt data and this leaves out the preceding header
999 * from the hash calculation. Instead we place the IV
1000 * in the state record and set the hash/crypt offset to
1001 * copy both the header+IV.
1003 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
1004 cmd0 |= SAFE_SA_CMD0_OUTBOUND;
1006 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
1007 iv = enccrd->crd_iv;
1009 iv = (caddr_t) ses->ses_iv;
1010 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
1011 crypto_copyback(crp->crp_flags, crp->crp_buf,
1012 enccrd->crd_inject, ivsize, iv);
1014 bcopy(iv, re->re_sastate.sa_saved_iv, ivsize);
1015 cmd0 |= SAFE_SA_CMD0_IVLD_STATE | SAFE_SA_CMD0_SAVEIV;
1016 re->re_flags |= SAFE_QFLAGS_COPYOUTIV;
1018 cmd0 |= SAFE_SA_CMD0_INBOUND;
1020 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
1021 bcopy(enccrd->crd_iv,
1022 re->re_sastate.sa_saved_iv, ivsize);
1024 crypto_copydata(crp->crp_flags, crp->crp_buf,
1025 enccrd->crd_inject, ivsize,
1026 (caddr_t)re->re_sastate.sa_saved_iv);
1028 cmd0 |= SAFE_SA_CMD0_IVLD_STATE;
1031 * For basic encryption use the zero pad algorithm.
1032 * This pads results to an 8-byte boundary and
1033 * suppresses padding verification for inbound (i.e.
1034 * decrypt) operations.
1036 * NB: Not sure if the 8-byte pad boundary is a problem.
1038 cmd0 |= SAFE_SA_CMD0_PAD_ZERO;
1040 /* XXX assert key bufs have the same size */
1041 bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key));
1045 if (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
1046 safe_setup_mackey(ses, maccrd->crd_alg,
1047 maccrd->crd_key, maccrd->crd_klen / 8);
1050 if (maccrd->crd_alg == CRYPTO_MD5_HMAC) {
1051 cmd0 |= SAFE_SA_CMD0_MD5;
1052 cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */
1053 } else if (maccrd->crd_alg == CRYPTO_SHA1_HMAC) {
1054 cmd0 |= SAFE_SA_CMD0_SHA1;
1055 cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */
1057 cmd0 |= SAFE_SA_CMD0_HASH_NULL;
1060 * Digest data is loaded from the SA and the hash
1061 * result is saved to the state block where we
1062 * retrieve it for return to the caller.
1064 /* XXX assert digest bufs have the same size */
1065 bcopy(ses->ses_hminner, sa->sa_indigest,
1066 sizeof(sa->sa_indigest));
1067 bcopy(ses->ses_hmouter, sa->sa_outdigest,
1068 sizeof(sa->sa_outdigest));
1070 cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH;
1071 re->re_flags |= SAFE_QFLAGS_COPYOUTICV;
1074 if (enccrd && maccrd) {
1076 * The offset from hash data to the start of
1077 * crypt data is the difference in the skips.
1079 bypass = maccrd->crd_skip;
1080 coffset = enccrd->crd_skip - maccrd->crd_skip;
1082 DPRINTF(("%s: hash does not precede crypt; "
1083 "mac skip %u enc skip %u\n",
1084 __func__, maccrd->crd_skip, enccrd->crd_skip));
1085 safestats.st_skipmismatch++;
1089 oplen = enccrd->crd_skip + enccrd->crd_len;
1090 if (maccrd->crd_skip + maccrd->crd_len != oplen) {
1091 DPRINTF(("%s: hash amount %u != crypt amount %u\n",
1092 __func__, maccrd->crd_skip + maccrd->crd_len,
1094 safestats.st_lenmismatch++;
1100 kprintf("mac: skip %d, len %d, inject %d\n",
1101 maccrd->crd_skip, maccrd->crd_len,
1102 maccrd->crd_inject);
1103 kprintf("enc: skip %d, len %d, inject %d\n",
1104 enccrd->crd_skip, enccrd->crd_len,
1105 enccrd->crd_inject);
1106 kprintf("bypass %d coffset %d oplen %d\n",
1107 bypass, coffset, oplen);
1110 if (coffset & 3) { /* offset must be 32-bit aligned */
1111 DPRINTF(("%s: coffset %u misaligned\n",
1112 __func__, coffset));
1113 safestats.st_coffmisaligned++;
1118 if (coffset > 255) { /* offset must be <256 dwords */
1119 DPRINTF(("%s: coffset %u too big\n",
1120 __func__, coffset));
1121 safestats.st_cofftoobig++;
1126 * Tell the hardware to copy the header to the output.
1127 * The header is defined as the data from the end of
1128 * the bypass to the start of data to be encrypted.
1129 * Typically this is the inline IV. Note that you need
1130 * to do this even if src+dst are the same; it appears
1131 * that w/o this bit the crypted data is written
1132 * immediately after the bypass data.
1134 cmd1 |= SAFE_SA_CMD1_HDRCOPY;
1136 * Disable IP header mutable bit handling. This is
1137 * needed to get correct HMAC calculations.
1139 cmd1 |= SAFE_SA_CMD1_MUTABLE;
1142 bypass = enccrd->crd_skip;
1143 oplen = bypass + enccrd->crd_len;
1145 bypass = maccrd->crd_skip;
1146 oplen = bypass + maccrd->crd_len;
1150 /* XXX verify multiple of 4 when using s/g */
1151 if (bypass > 96) { /* bypass offset must be <= 96 bytes */
1152 DPRINTF(("%s: bypass %u too big\n", __func__, bypass));
1153 safestats.st_bypasstoobig++;
1158 if (bus_dmamap_create(sc->sc_srcdmat, BUS_DMA_NOWAIT, &re->re_src_map)) {
1159 safestats.st_nomap++;
1163 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1164 if (bus_dmamap_load_mbuf(sc->sc_srcdmat, re->re_src_map,
1165 re->re_src_m, safe_op_cb,
1166 &re->re_src, BUS_DMA_NOWAIT) != 0) {
1167 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
1168 re->re_src_map = NULL;
1169 safestats.st_noload++;
1173 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1174 if (bus_dmamap_load_uio(sc->sc_srcdmat, re->re_src_map,
1175 re->re_src_io, safe_op_cb,
1176 &re->re_src, BUS_DMA_NOWAIT) != 0) {
1177 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
1178 re->re_src_map = NULL;
1179 safestats.st_noload++;
1184 nicealign = safe_dmamap_aligned(&re->re_src);
1185 uniform = safe_dmamap_uniform(&re->re_src);
1187 DPRINTF(("src nicealign %u uniform %u nsegs %u\n",
1188 nicealign, uniform, re->re_src.nsegs));
1189 if (re->re_src.nsegs > 1) {
1190 re->re_desc.d_src = sc->sc_spalloc.dma_paddr +
1191 ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring);
1192 for (i = 0; i < re->re_src_nsegs; i++) {
1193 /* NB: no need to check if there's space */
1195 if (++(sc->sc_spfree) == sc->sc_springtop)
1196 sc->sc_spfree = sc->sc_spring;
1198 KASSERT((pd->pd_flags&3) == 0 ||
1199 (pd->pd_flags&3) == SAFE_PD_DONE,
1200 ("bogus source particle descriptor; flags %x",
1202 pd->pd_addr = re->re_src_segs[i].ds_addr;
1203 pd->pd_size = re->re_src_segs[i].ds_len;
1204 pd->pd_flags = SAFE_PD_READY;
1206 cmd0 |= SAFE_SA_CMD0_IGATHER;
1209 * No need for gather, reference the operand directly.
1211 re->re_desc.d_src = re->re_src_segs[0].ds_addr;
1214 if (enccrd == NULL && maccrd != NULL) {
1216 * Hash op; no destination needed.
1219 if (crp->crp_flags & CRYPTO_F_IOV) {
1221 safestats.st_iovmisaligned++;
1227 * Source is not suitable for direct use as
1228 * the destination. Create a new scatter/gather
1229 * list based on the destination requirements
1230 * and check if that's ok.
1232 if (bus_dmamap_create(sc->sc_dstdmat,
1233 BUS_DMA_NOWAIT, &re->re_dst_map)) {
1234 safestats.st_nomap++;
1238 if (bus_dmamap_load_uio(sc->sc_dstdmat,
1239 re->re_dst_map, re->re_dst_io,
1240 safe_op_cb, &re->re_dst,
1241 BUS_DMA_NOWAIT) != 0) {
1242 bus_dmamap_destroy(sc->sc_dstdmat,
1244 re->re_dst_map = NULL;
1245 safestats.st_noload++;
1249 uniform = safe_dmamap_uniform(&re->re_dst);
1252 * There's no way to handle the DMA
1253 * requirements with this uio. We
1254 * could create a separate DMA area for
1255 * the result and then copy it back,
1256 * but for now we just bail and return
1257 * an error. Note that uio requests
1258 * > SAFE_MAX_DSIZE are handled because
1259 * the DMA map and segment list for the
1260 * destination wil result in a
1261 * destination particle list that does
1262 * the necessary scatter DMA.
1264 safestats.st_iovnotuniform++;
1269 re->re_dst = re->re_src;
1270 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1271 if (nicealign && uniform == 1) {
1273 * Source layout is suitable for direct
1274 * sharing of the DMA map and segment list.
1276 re->re_dst = re->re_src;
1277 } else if (nicealign && uniform == 2) {
1279 * The source is properly aligned but requires a
1280 * different particle list to handle DMA of the
1281 * result. Create a new map and do the load to
1282 * create the segment list. The particle
1283 * descriptor setup code below will handle the
1286 if (bus_dmamap_create(sc->sc_dstdmat,
1287 BUS_DMA_NOWAIT, &re->re_dst_map)) {
1288 safestats.st_nomap++;
1292 if (bus_dmamap_load_mbuf(sc->sc_dstdmat,
1293 re->re_dst_map, re->re_dst_m,
1294 safe_op_cb, &re->re_dst,
1295 BUS_DMA_NOWAIT) != 0) {
1296 bus_dmamap_destroy(sc->sc_dstdmat,
1298 re->re_dst_map = NULL;
1299 safestats.st_noload++;
1303 } else { /* !(aligned and/or uniform) */
1305 struct mbuf *m, *top, **mp;
1308 * DMA constraints require that we allocate a
1309 * new mbuf chain for the destination. We
1310 * allocate an entire new set of mbufs of
1311 * optimal/required size and then tell the
1312 * hardware to copy any bits that are not
1313 * created as a byproduct of the operation.
1316 safestats.st_unaligned++;
1318 safestats.st_notuniform++;
1319 totlen = re->re_src_mapsize;
1320 if (re->re_src_m->m_flags & M_PKTHDR) {
1322 MGETHDR(m, M_NOWAIT, MT_DATA);
1323 if (m && !m_dup_pkthdr(m, re->re_src_m,
1330 MGET(m, M_NOWAIT, MT_DATA);
1333 safestats.st_nombuf++;
1334 err = sc->sc_nqchip ? ERESTART : ENOMEM;
1337 if (totlen >= MINCLSIZE) {
1338 MCLGET(m, M_NOWAIT);
1339 if ((m->m_flags & M_EXT) == 0) {
1341 safestats.st_nomcl++;
1342 err = sc->sc_nqchip ?
1352 while (totlen > 0) {
1354 MGET(m, M_NOWAIT, MT_DATA);
1357 safestats.st_nombuf++;
1358 err = sc->sc_nqchip ?
1364 if (top && totlen >= MINCLSIZE) {
1365 MCLGET(m, M_NOWAIT);
1366 if ((m->m_flags & M_EXT) == 0) {
1369 safestats.st_nomcl++;
1370 err = sc->sc_nqchip ?
1376 m->m_len = len = min(totlen, len);
1382 if (bus_dmamap_create(sc->sc_dstdmat,
1383 BUS_DMA_NOWAIT, &re->re_dst_map) != 0) {
1384 safestats.st_nomap++;
1388 if (bus_dmamap_load_mbuf(sc->sc_dstdmat,
1389 re->re_dst_map, re->re_dst_m,
1390 safe_op_cb, &re->re_dst,
1391 BUS_DMA_NOWAIT) != 0) {
1392 bus_dmamap_destroy(sc->sc_dstdmat,
1394 re->re_dst_map = NULL;
1395 safestats.st_noload++;
1399 if (re->re_src.mapsize > oplen) {
1401 * There's data following what the
1402 * hardware will copy for us. If this
1403 * isn't just the ICV (that's going to
1404 * be written on completion), copy it
1408 (re->re_src.mapsize-oplen) == 12 &&
1409 maccrd->crd_inject == oplen))
1410 safe_mcopy(re->re_src_m,
1414 safestats.st_noicvcopy++;
1418 safestats.st_badflags++;
1423 if (re->re_dst.nsegs > 1) {
1424 re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr +
1425 ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring);
1426 for (i = 0; i < re->re_dst_nsegs; i++) {
1428 KASSERT((pd->pd_flags&3) == 0 ||
1429 (pd->pd_flags&3) == SAFE_PD_DONE,
1430 ("bogus dest particle descriptor; flags %x",
1432 if (++(sc->sc_dpfree) == sc->sc_dpringtop)
1433 sc->sc_dpfree = sc->sc_dpring;
1434 pd->pd_addr = re->re_dst_segs[i].ds_addr;
1435 pd->pd_flags = SAFE_PD_READY;
1437 cmd0 |= SAFE_SA_CMD0_OSCATTER;
1440 * No need for scatter, reference the operand directly.
1442 re->re_desc.d_dst = re->re_dst_segs[0].ds_addr;
1447 * All done with setup; fillin the SA command words
1448 * and the packet engine descriptor. The operation
1449 * is now ready for submission to the hardware.
1451 sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI;
1453 | (coffset << SAFE_SA_CMD1_OFFSET_S)
1454 | SAFE_SA_CMD1_SAREV1 /* Rev 1 SA data structure */
1455 | SAFE_SA_CMD1_SRPCI
1458 * NB: the order of writes is important here. In case the
1459 * chip is scanning the ring because of an outstanding request
1460 * it might nab this one too. In that case we need to make
1461 * sure the setup is complete before we write the length
1462 * field of the descriptor as it signals the descriptor is
1463 * ready for processing.
1465 re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI;
1467 re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL;
1468 re->re_desc.d_len = oplen
1470 | (bypass << SAFE_PE_LEN_BYPASS_S)
1473 safestats.st_ipackets++;
1474 safestats.st_ibytes += oplen;
1476 if (++(sc->sc_front) == sc->sc_ringtop)
1477 sc->sc_front = sc->sc_ring;
1479 /* XXX honor batching */
1481 lockmgr(&sc->sc_ringlock, LK_RELEASE);
1485 if ((re->re_dst_m != NULL) && (re->re_src_m != re->re_dst_m))
1486 m_freem(re->re_dst_m);
1488 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) {
1489 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map);
1490 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map);
1492 if (re->re_src_map != NULL) {
1493 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map);
1494 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
1496 lockmgr(&sc->sc_ringlock, LK_RELEASE);
1497 if (err != ERESTART) {
1498 crp->crp_etype = err;
1501 sc->sc_needwakeup |= CRYPTO_SYMQ;
1507 safe_callback(struct safe_softc *sc, struct safe_ringentry *re)
1509 struct cryptop *crp = (struct cryptop *)re->re_crp;
1510 struct cryptodesc *crd;
1512 safestats.st_opackets++;
1513 safestats.st_obytes += re->re_dst.mapsize;
1515 safe_dma_sync(&sc->sc_ringalloc,
1516 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1517 if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) {
1518 device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n",
1520 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1);
1521 safestats.st_peoperr++;
1522 crp->crp_etype = EIO; /* something more meaningful? */
1524 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) {
1525 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map,
1526 BUS_DMASYNC_POSTREAD);
1527 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map);
1528 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map);
1530 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_POSTWRITE);
1531 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map);
1532 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
1535 * If result was written to a differet mbuf chain, swap
1536 * it in as the return value and reclaim the original.
1538 if ((crp->crp_flags & CRYPTO_F_IMBUF) && re->re_src_m != re->re_dst_m) {
1539 m_freem(re->re_src_m);
1540 crp->crp_buf = (caddr_t)re->re_dst_m;
1543 if (re->re_flags & SAFE_QFLAGS_COPYOUTIV) {
1544 /* copy out IV for future use */
1545 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1548 if (crd->crd_alg == CRYPTO_DES_CBC ||
1549 crd->crd_alg == CRYPTO_3DES_CBC) {
1550 ivsize = 2*sizeof(u_int32_t);
1551 } else if (crd->crd_alg == CRYPTO_AES_CBC) {
1552 ivsize = 4*sizeof(u_int32_t);
1555 crypto_copydata(crp->crp_flags, crp->crp_buf,
1556 crd->crd_skip + crd->crd_len - ivsize, ivsize,
1557 (caddr_t)sc->sc_sessions[re->re_sesn].ses_iv);
1562 if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) {
1563 /* copy out ICV result */
1564 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1565 if (!(crd->crd_alg == CRYPTO_MD5_HMAC ||
1566 crd->crd_alg == CRYPTO_SHA1_HMAC ||
1567 crd->crd_alg == CRYPTO_NULL_HMAC))
1569 if (crd->crd_alg == CRYPTO_SHA1_HMAC) {
1571 * SHA-1 ICV's are byte-swapped; fix 'em up
1572 * before copy them to their destination.
1574 re->re_sastate.sa_saved_indigest[0] =
1575 bswap32(re->re_sastate.sa_saved_indigest[0]);
1576 re->re_sastate.sa_saved_indigest[1] =
1577 bswap32(re->re_sastate.sa_saved_indigest[1]);
1578 re->re_sastate.sa_saved_indigest[2] =
1579 bswap32(re->re_sastate.sa_saved_indigest[2]);
1581 crypto_copyback(crp->crp_flags, crp->crp_buf,
1583 sc->sc_sessions[re->re_sesn].ses_mlen,
1584 (caddr_t)re->re_sastate.sa_saved_indigest);
1592 * Copy all data past offset from srcm to dstm.
1595 safe_mcopy(struct mbuf *srcm, struct mbuf *dstm, u_int offset)
1597 u_int j, dlen, slen;
1601 * Advance src and dst to offset.
1605 if (srcm->m_len > j)
1608 srcm = srcm->m_next;
1612 sptr = mtod(srcm, caddr_t) + j;
1613 slen = srcm->m_len - j;
1617 if (dstm->m_len > j)
1620 dstm = dstm->m_next;
1624 dptr = mtod(dstm, caddr_t) + j;
1625 dlen = dstm->m_len - j;
1628 * Copy everything that remains.
1631 j = min(slen, dlen);
1632 bcopy(sptr, dptr, j);
1634 srcm = srcm->m_next;
1637 sptr = srcm->m_data;
1640 sptr += j, slen -= j;
1642 dstm = dstm->m_next;
1645 dptr = dstm->m_data;
1648 dptr += j, dlen -= j;
1653 #define SAFE_RNG_MAXWAIT 1000
1656 safe_rng_init(struct safe_softc *sc)
1661 WRITE_REG(sc, SAFE_RNG_CTRL, 0);
1662 /* use default value according to the manual */
1663 WRITE_REG(sc, SAFE_RNG_CNFG, 0x834); /* magic from SafeNet */
1664 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1667 * There is a bug in rev 1.0 of the 1140 that when the RNG
1668 * is brought out of reset the ready status flag does not
1669 * work until the RNG has finished its internal initialization.
1671 * So in order to determine the device is through its
1672 * initialization we must read the data register, using the
1673 * status reg in the read in case it is initialized. Then read
1674 * the data register until it changes from the first read.
1675 * Once it changes read the data register until it changes
1676 * again. At this time the RNG is considered initialized.
1677 * This could take between 750ms - 1000ms in time.
1680 w = READ_REG(sc, SAFE_RNG_OUT);
1682 v = READ_REG(sc, SAFE_RNG_OUT);
1688 } while (++i < SAFE_RNG_MAXWAIT);
1690 /* Wait Until data changes again */
1693 v = READ_REG(sc, SAFE_RNG_OUT);
1697 } while (++i < SAFE_RNG_MAXWAIT);
1700 static __inline void
1701 safe_rng_disable_short_cycle(struct safe_softc *sc)
1703 WRITE_REG(sc, SAFE_RNG_CTRL,
1704 READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN);
1707 static __inline void
1708 safe_rng_enable_short_cycle(struct safe_softc *sc)
1710 WRITE_REG(sc, SAFE_RNG_CTRL,
1711 READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN);
1714 static __inline u_int32_t
1715 safe_rng_read(struct safe_softc *sc)
1720 while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT)
1722 return READ_REG(sc, SAFE_RNG_OUT);
1728 struct safe_softc *sc = arg;
1729 u_int32_t buf[SAFE_RNG_MAXBUFSIZ]; /* NB: maybe move to softc */
1735 * Fetch the next block of data.
1737 maxwords = safe_rngbufsize;
1738 if (maxwords > SAFE_RNG_MAXBUFSIZ)
1739 maxwords = SAFE_RNG_MAXBUFSIZ;
1741 for (i = 0; i < maxwords; i++)
1742 buf[i] = safe_rng_read(sc);
1744 * Check the comparator alarm count and reset the h/w if
1745 * it exceeds our threshold. This guards against the
1746 * hardware oscillators resonating with external signals.
1748 if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) {
1749 u_int32_t freq_inc, w;
1751 DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__,
1752 READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm));
1753 safestats.st_rngalarm++;
1754 safe_rng_enable_short_cycle(sc);
1756 for (i = 0; i < 64; i++) {
1757 w = READ_REG(sc, SAFE_RNG_CNFG);
1758 freq_inc = ((w + freq_inc) & 0x3fL);
1759 w = ((w & ~0x3fL) | freq_inc);
1760 WRITE_REG(sc, SAFE_RNG_CNFG, w);
1762 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1764 (void) safe_rng_read(sc);
1767 if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) {
1768 safe_rng_disable_short_cycle(sc);
1773 safe_rng_disable_short_cycle(sc);
1775 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1777 (*sc->sc_harvest)(sc->sc_rndtest, buf, maxwords*sizeof (u_int32_t));
1778 callout_reset(&sc->sc_rngto,
1779 hz * (safe_rnginterval ? safe_rnginterval : 1), safe_rng, sc);
1781 #endif /* SAFE_NO_RNG */
1784 safe_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1786 bus_addr_t *paddr = (bus_addr_t*) arg;
1787 *paddr = segs->ds_addr;
1792 struct safe_softc *sc,
1794 struct safe_dma_alloc *dma,
1800 r = bus_dma_tag_create(NULL, /* parent */
1801 sizeof(u_int32_t), 0, /* alignment, bounds */
1802 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1803 BUS_SPACE_MAXADDR, /* highaddr */
1804 NULL, NULL, /* filter, filterarg */
1807 size, /* maxsegsize */
1808 BUS_DMA_ALLOCNOW, /* flags */
1811 device_printf(sc->sc_dev, "safe_dma_malloc: "
1812 "bus_dma_tag_create failed; error %u\n", r);
1816 r = bus_dmamap_create(dma->dma_tag, BUS_DMA_NOWAIT, &dma->dma_map);
1818 device_printf(sc->sc_dev, "safe_dma_malloc: "
1819 "bus_dmamap_create failed; error %u\n", r);
1823 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
1824 BUS_DMA_NOWAIT, &dma->dma_map);
1826 device_printf(sc->sc_dev, "safe_dma_malloc: "
1827 "bus_dmammem_alloc failed; size %zu, error %u\n",
1832 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1836 mapflags | BUS_DMA_NOWAIT);
1838 device_printf(sc->sc_dev, "safe_dma_malloc: "
1839 "bus_dmamap_load failed; error %u\n", r);
1843 dma->dma_size = size;
1847 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1849 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1851 bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1852 bus_dma_tag_destroy(dma->dma_tag);
1854 dma->dma_map = NULL;
1855 dma->dma_tag = NULL;
1860 safe_dma_free(struct safe_softc *sc, struct safe_dma_alloc *dma)
1862 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1863 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1864 bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1865 bus_dma_tag_destroy(dma->dma_tag);
1869 * Resets the board. Values in the regesters are left as is
1870 * from the reset (i.e. initial values are assigned elsewhere).
1873 safe_reset_board(struct safe_softc *sc)
1877 * Reset the device. The manual says no delay
1878 * is needed between marking and clearing reset.
1880 v = READ_REG(sc, SAFE_PE_DMACFG) &~
1881 (SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET |
1882 SAFE_PE_DMACFG_SGRESET);
1883 WRITE_REG(sc, SAFE_PE_DMACFG, v
1884 | SAFE_PE_DMACFG_PERESET
1885 | SAFE_PE_DMACFG_PDRRESET
1886 | SAFE_PE_DMACFG_SGRESET);
1887 WRITE_REG(sc, SAFE_PE_DMACFG, v);
1891 * Initialize registers we need to touch only once.
1894 safe_init_board(struct safe_softc *sc)
1896 u_int32_t v, dwords;
1898 v = READ_REG(sc, SAFE_PE_DMACFG);
1899 v &=~ SAFE_PE_DMACFG_PEMODE;
1900 v |= SAFE_PE_DMACFG_FSENA /* failsafe enable */
1901 | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */
1902 | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */
1903 | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */
1904 | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */
1905 | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */
1907 WRITE_REG(sc, SAFE_PE_DMACFG, v);
1909 /* XXX select byte swap based on host byte order */
1910 WRITE_REG(sc, SAFE_ENDIAN, 0x1b);
1912 if (sc->sc_chiprev == SAFE_REV(1,0)) {
1914 * Avoid large PCI DMA transfers. Rev 1.0 has a bug where
1915 * "target mode transfers" done while the chip is DMA'ing
1916 * >1020 bytes cause the hardware to lockup. To avoid this
1917 * we reduce the max PCI transfer size and use small source
1918 * particle descriptors (<= 256 bytes).
1920 WRITE_REG(sc, SAFE_DMA_CFG, 256);
1921 device_printf(sc->sc_dev,
1922 "Reduce max DMA size to %u words for rev %u.%u WAR\n",
1923 (READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff,
1924 SAFE_REV_MAJ(sc->sc_chiprev),
1925 SAFE_REV_MIN(sc->sc_chiprev));
1928 /* NB: operands+results are overlaid */
1929 WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr);
1930 WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr);
1932 * Configure ring entry size and number of items in the ring.
1934 KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0,
1935 ("PE ring entry not 32-bit aligned!"));
1936 dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t);
1937 WRITE_REG(sc, SAFE_PE_RINGCFG,
1938 (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE);
1939 WRITE_REG(sc, SAFE_PE_RINGPOLL, 0); /* disable polling */
1941 WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr);
1942 WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr);
1943 WRITE_REG(sc, SAFE_PE_PARTSIZE,
1944 (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART);
1946 * NB: destination particles are fixed size. We use
1947 * an mbuf cluster and require all results go to
1948 * clusters or smaller.
1950 WRITE_REG(sc, SAFE_PE_PARTCFG, SAFE_MAX_DSIZE);
1952 /* it's now safe to enable PE mode, do it */
1953 WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE);
1956 * Configure hardware to use level-triggered interrupts and
1957 * to interrupt after each descriptor is processed.
1959 WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL);
1960 WRITE_REG(sc, SAFE_HI_DESC_CNT, 1);
1961 WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR);
1965 * Init PCI registers
1968 safe_init_pciregs(device_t dev)
1973 * Clean up after a chip crash.
1974 * It is assumed that the caller in splimp()
1977 safe_cleanchip(struct safe_softc *sc)
1980 if (sc->sc_nqchip != 0) {
1981 struct safe_ringentry *re = sc->sc_back;
1983 while (re != sc->sc_front) {
1984 if (re->re_desc.d_csr != 0)
1985 safe_free_entry(sc, re);
1986 if (++re == sc->sc_ringtop)
1996 * It is assumed that the caller is within splimp().
1999 safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re)
2001 struct cryptop *crp;
2006 if ((re->re_dst_m != NULL) && (re->re_src_m != re->re_dst_m))
2007 m_freem(re->re_dst_m);
2009 crp = (struct cryptop *)re->re_crp;
2011 re->re_desc.d_csr = 0;
2013 crp->crp_etype = EFAULT;
2019 * Routine to reset the chip and clean up.
2020 * It is assumed that the caller is in splimp()
2023 safe_totalreset(struct safe_softc *sc)
2025 safe_reset_board(sc);
2026 safe_init_board(sc);
2031 * Is the operand suitable aligned for direct DMA. Each
2032 * segment must be aligned on a 32-bit boundary and all
2033 * but the last segment must be a multiple of 4 bytes.
2036 safe_dmamap_aligned(const struct safe_operand *op)
2040 for (i = 0; i < op->nsegs; i++) {
2041 if (op->segs[i].ds_addr & 3)
2043 if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3))
2050 * Is the operand suitable for direct DMA as the destination
2051 * of an operation. The hardware requires that each ``particle''
2052 * but the last in an operation result have the same size. We
2053 * fix that size at SAFE_MAX_DSIZE bytes. This routine returns
2054 * 0 if some segment is not a multiple of of this size, 1 if all
2055 * segments are exactly this size, or 2 if segments are at worst
2056 * a multple of this size.
2059 safe_dmamap_uniform(const struct safe_operand *op)
2063 if (op->nsegs > 0) {
2066 for (i = 0; i < op->nsegs-1; i++) {
2067 if (op->segs[i].ds_len % SAFE_MAX_DSIZE)
2069 if (op->segs[i].ds_len != SAFE_MAX_DSIZE)
2078 safe_dump_dmastatus(struct safe_softc *sc, const char *tag)
2080 kprintf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n"
2082 , READ_REG(sc, SAFE_DMA_ENDIAN)
2083 , READ_REG(sc, SAFE_DMA_SRCADDR)
2084 , READ_REG(sc, SAFE_DMA_DSTADDR)
2085 , READ_REG(sc, SAFE_DMA_STAT)
2090 safe_dump_intrstate(struct safe_softc *sc, const char *tag)
2092 kprintf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n"
2094 , READ_REG(sc, SAFE_HI_CFG)
2095 , READ_REG(sc, SAFE_HI_MASK)
2096 , READ_REG(sc, SAFE_HI_DESC_CNT)
2097 , READ_REG(sc, SAFE_HU_STAT)
2098 , READ_REG(sc, SAFE_HM_STAT)
2103 safe_dump_ringstate(struct safe_softc *sc, const char *tag)
2105 u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT);
2107 /* NB: assume caller has lock on ring */
2108 kprintf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n",
2110 estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S),
2111 (unsigned long)(sc->sc_back - sc->sc_ring),
2112 (unsigned long)(sc->sc_front - sc->sc_ring));
2116 safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re)
2120 ix = re - sc->sc_ring;
2121 kprintf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n"
2130 if (re->re_src.nsegs > 1) {
2131 ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) /
2132 sizeof(struct safe_pdesc);
2133 for (nsegs = re->re_src.nsegs; nsegs; nsegs--) {
2134 kprintf(" spd[%u] %p: %p size %u flags %x"
2135 , ix, &sc->sc_spring[ix]
2136 , (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr
2137 , sc->sc_spring[ix].pd_size
2138 , sc->sc_spring[ix].pd_flags
2140 if (sc->sc_spring[ix].pd_size == 0)
2141 kprintf(" (zero!)");
2143 if (++ix == SAFE_TOTAL_SPART)
2147 if (re->re_dst.nsegs > 1) {
2148 ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) /
2149 sizeof(struct safe_pdesc);
2150 for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) {
2151 kprintf(" dpd[%u] %p: %p flags %x\n"
2152 , ix, &sc->sc_dpring[ix]
2153 , (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr
2154 , sc->sc_dpring[ix].pd_flags
2156 if (++ix == SAFE_TOTAL_DPART)
2160 kprintf("sa: cmd0 %08x cmd1 %08x staterec %x\n",
2161 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec);
2162 kprintf("sa: key %x %x %x %x %x %x %x %x\n"
2163 , re->re_sa.sa_key[0]
2164 , re->re_sa.sa_key[1]
2165 , re->re_sa.sa_key[2]
2166 , re->re_sa.sa_key[3]
2167 , re->re_sa.sa_key[4]
2168 , re->re_sa.sa_key[5]
2169 , re->re_sa.sa_key[6]
2170 , re->re_sa.sa_key[7]
2172 kprintf("sa: indigest %x %x %x %x %x\n"
2173 , re->re_sa.sa_indigest[0]
2174 , re->re_sa.sa_indigest[1]
2175 , re->re_sa.sa_indigest[2]
2176 , re->re_sa.sa_indigest[3]
2177 , re->re_sa.sa_indigest[4]
2179 kprintf("sa: outdigest %x %x %x %x %x\n"
2180 , re->re_sa.sa_outdigest[0]
2181 , re->re_sa.sa_outdigest[1]
2182 , re->re_sa.sa_outdigest[2]
2183 , re->re_sa.sa_outdigest[3]
2184 , re->re_sa.sa_outdigest[4]
2186 kprintf("sr: iv %x %x %x %x\n"
2187 , re->re_sastate.sa_saved_iv[0]
2188 , re->re_sastate.sa_saved_iv[1]
2189 , re->re_sastate.sa_saved_iv[2]
2190 , re->re_sastate.sa_saved_iv[3]
2192 kprintf("sr: hashbc %u indigest %x %x %x %x %x\n"
2193 , re->re_sastate.sa_saved_hashbc
2194 , re->re_sastate.sa_saved_indigest[0]
2195 , re->re_sastate.sa_saved_indigest[1]
2196 , re->re_sastate.sa_saved_indigest[2]
2197 , re->re_sastate.sa_saved_indigest[3]
2198 , re->re_sastate.sa_saved_indigest[4]
2203 safe_dump_ring(struct safe_softc *sc, const char *tag)
2205 lockmgr(&sc->sc_ringlock, LK_EXCLUSIVE);
2206 kprintf("\nSafeNet Ring State:\n");
2207 safe_dump_intrstate(sc, tag);
2208 safe_dump_dmastatus(sc, tag);
2209 safe_dump_ringstate(sc, tag);
2210 if (sc->sc_nqchip) {
2211 struct safe_ringentry *re = sc->sc_back;
2213 safe_dump_request(sc, tag, re);
2214 if (++re == sc->sc_ringtop)
2216 } while (re != sc->sc_front);
2218 lockmgr(&sc->sc_ringlock, LK_RELEASE);
2222 sysctl_hw_safe_dump(SYSCTL_HANDLER_ARGS)
2227 strncpy(dmode, "", sizeof(dmode) - 1);
2228 dmode[sizeof(dmode) - 1] = '\0';
2229 error = sysctl_handle_string(oidp, &dmode[0], sizeof(dmode), req);
2231 if (error == 0 && req->newptr != NULL) {
2232 struct safe_softc *sc = safec;
2236 if (strncmp(dmode, "dma", 3) == 0)
2237 safe_dump_dmastatus(sc, "safe0");
2238 else if (strncmp(dmode, "int", 3) == 0)
2239 safe_dump_intrstate(sc, "safe0");
2240 else if (strncmp(dmode, "ring", 4) == 0)
2241 safe_dump_ring(sc, "safe0");
2247 SYSCTL_PROC(_hw_safe, OID_AUTO, dump, CTLTYPE_STRING | CTLFLAG_RW,
2248 0, 0, sysctl_hw_safe_dump, "A", "Dump driver state");
2249 #endif /* SAFE_DEBUG */