1 /* $FreeBSD: src/sys/dev/hifn/hifn7751.c,v 1.5.2.5 2003/06/04 17:56:59 sam Exp $ */
2 /* $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $ */
5 * Invertex AEON / Hifn 7751 driver
6 * Copyright (c) 1999 Invertex Inc. All rights reserved.
7 * Copyright (c) 1999 Theo de Raadt
8 * Copyright (c) 2000-2001 Network Security Technologies, Inc.
9 * http://www.netsec.net
10 * Copyright (c) 2003 Hifn Inc.
12 * This driver is based on a previous driver by Invertex, for which they
13 * requested: Please send any comments, feedback, bug-fixes, or feature
14 * requests to software@invertex.com.
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. The name of the author may not be used to endorse or promote products
26 * derived from this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
29 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
30 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
31 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
33 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
37 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * Effort sponsored in part by the Defense Advanced Research Projects
40 * Agency (DARPA) and Air Force Research Laboratory, Air Force
41 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
46 * Driver for various Hifn encryption processors.
50 #include <sys/param.h>
51 #include <sys/systm.h>
53 #include <sys/errno.h>
54 #include <sys/malloc.h>
55 #include <sys/kernel.h>
57 #include <sys/sysctl.h>
60 #include <sys/random.h>
61 #include <sys/thread2.h>
67 #include <machine/clock.h>
68 #include <opencrypto/cryptodev.h>
70 #include "cryptodev_if.h"
72 #include <bus/pci/pcivar.h>
73 #include <bus/pci/pcireg.h>
76 #include "../rndtest/rndtest.h"
78 #include "hifn7751reg.h"
79 #include "hifn7751var.h"
82 * Prototypes and count for the pci_device structure
84 static int hifn_probe(device_t);
85 static int hifn_attach(device_t);
86 static int hifn_detach(device_t);
87 static int hifn_suspend(device_t);
88 static int hifn_resume(device_t);
89 static void hifn_shutdown(device_t);
91 static void hifn_reset_board(struct hifn_softc *, int);
92 static void hifn_reset_puc(struct hifn_softc *);
93 static void hifn_puc_wait(struct hifn_softc *);
94 static int hifn_enable_crypto(struct hifn_softc *);
95 static void hifn_set_retry(struct hifn_softc *sc);
96 static void hifn_init_dma(struct hifn_softc *);
97 static void hifn_init_pci_registers(struct hifn_softc *);
98 static int hifn_sramsize(struct hifn_softc *);
99 static int hifn_dramsize(struct hifn_softc *);
100 static int hifn_ramtype(struct hifn_softc *);
101 static void hifn_sessions(struct hifn_softc *);
102 static void hifn_intr(void *);
103 static u_int hifn_write_command(struct hifn_command *, u_int8_t *);
104 static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
105 static int hifn_newsession(device_t, u_int32_t *, struct cryptoini *);
106 static int hifn_freesession(device_t, u_int64_t);
107 static int hifn_process(device_t, struct cryptop *, int);
108 static void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
109 static int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int);
110 static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
111 static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
112 static int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
113 static int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
114 static int hifn_init_pubrng(struct hifn_softc *);
116 static void hifn_rng(void *);
118 static void hifn_tick(void *);
119 static void hifn_abort(struct hifn_softc *);
120 static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
122 static void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t);
123 static void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t);
126 static device_method_t hifn_methods[] = {
127 /* Device interface */
128 DEVMETHOD(device_probe, hifn_probe),
129 DEVMETHOD(device_attach, hifn_attach),
130 DEVMETHOD(device_detach, hifn_detach),
131 DEVMETHOD(device_suspend, hifn_suspend),
132 DEVMETHOD(device_resume, hifn_resume),
133 DEVMETHOD(device_shutdown, hifn_shutdown),
136 DEVMETHOD(bus_print_child, bus_generic_print_child),
137 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
139 /* crypto device methods */
140 DEVMETHOD(cryptodev_newsession, hifn_newsession),
141 DEVMETHOD(cryptodev_freesession,hifn_freesession),
142 DEVMETHOD(cryptodev_process, hifn_process),
146 static driver_t hifn_driver = {
149 sizeof (struct hifn_softc)
151 static devclass_t hifn_devclass;
153 DECLARE_DUMMY_MODULE(hifn);
154 DRIVER_MODULE(hifn, pci, hifn_driver, hifn_devclass, NULL, NULL);
155 MODULE_DEPEND(hifn, crypto, 1, 1, 1);
157 MODULE_DEPEND(hifn, rndtest, 1, 1, 1);
160 static __inline__ u_int32_t
161 READ_REG_0(struct hifn_softc *sc, bus_size_t reg)
163 u_int32_t v = bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg);
164 sc->sc_bar0_lastreg = (bus_size_t) -1;
167 #define WRITE_REG_0(sc, reg, val) hifn_write_reg_0(sc, reg, val)
169 static __inline__ u_int32_t
170 READ_REG_1(struct hifn_softc *sc, bus_size_t reg)
172 u_int32_t v = bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg);
173 sc->sc_bar1_lastreg = (bus_size_t) -1;
176 #define WRITE_REG_1(sc, reg, val) hifn_write_reg_1(sc, reg, val)
178 SYSCTL_NODE(_hw, OID_AUTO, hifn, CTLFLAG_RD, 0, "Hifn driver parameters");
181 static int hifn_debug = 0;
182 SYSCTL_INT(_hw_hifn, OID_AUTO, debug, CTLFLAG_RW, &hifn_debug,
183 0, "control debugging msgs");
186 static struct hifn_stats hifnstats;
187 SYSCTL_STRUCT(_hw_hifn, OID_AUTO, stats, CTLFLAG_RD, &hifnstats,
188 hifn_stats, "driver statistics");
189 static int hifn_maxbatch = 1;
190 SYSCTL_INT(_hw_hifn, OID_AUTO, maxbatch, CTLFLAG_RW, &hifn_maxbatch,
191 0, "max ops to batch w/o interrupt");
194 * Probe for a supported device. The PCI vendor and device
195 * IDs are used to detect devices we know how to handle.
198 hifn_probe(device_t dev)
200 if (pci_get_vendor(dev) == PCI_VENDOR_INVERTEX &&
201 pci_get_device(dev) == PCI_PRODUCT_INVERTEX_AEON)
203 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
204 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7751 ||
205 pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
206 pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
207 pci_get_device(dev) == PCI_PRODUCT_HIFN_7956 ||
208 pci_get_device(dev) == PCI_PRODUCT_HIFN_7811))
210 if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
211 pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751)
213 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN) {
214 device_printf(dev,"device id = 0x%x\n", pci_get_device(dev) );
221 hifn_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
223 bus_addr_t *paddr = (bus_addr_t*) arg;
224 *paddr = segs->ds_addr;
228 hifn_partname(struct hifn_softc *sc)
230 /* XXX sprintf numbers when not decoded */
231 switch (pci_get_vendor(sc->sc_dev)) {
232 case PCI_VENDOR_HIFN:
233 switch (pci_get_device(sc->sc_dev)) {
234 case PCI_PRODUCT_HIFN_6500: return "Hifn 6500";
235 case PCI_PRODUCT_HIFN_7751: return "Hifn 7751";
236 case PCI_PRODUCT_HIFN_7811: return "Hifn 7811";
237 case PCI_PRODUCT_HIFN_7951: return "Hifn 7951";
238 case PCI_PRODUCT_HIFN_7955: return "Hifn 7955";
239 case PCI_PRODUCT_HIFN_7956: return "Hifn 7956";
241 return "Hifn unknown-part";
242 case PCI_VENDOR_INVERTEX:
243 switch (pci_get_device(sc->sc_dev)) {
244 case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON";
246 return "Invertex unknown-part";
247 case PCI_VENDOR_NETSEC:
248 switch (pci_get_device(sc->sc_dev)) {
249 case PCI_PRODUCT_NETSEC_7751: return "NetSec 7751";
251 return "NetSec unknown-part";
253 return "Unknown-vendor unknown-part";
257 default_harvest(struct rndtest_state *rsp, void *buf, u_int count)
259 u_int32_t *p = (u_int32_t *)buf;
260 for (count /= sizeof (u_int32_t); count; count--)
261 add_true_randomness(*p++);
265 checkmaxmin(device_t dev, const char *what, u_int v, u_int min, u_int max)
268 device_printf(dev, "Warning, %s %u out of range, "
269 "using max %u\n", what, v, max);
271 } else if (v < min) {
272 device_printf(dev, "Warning, %s %u out of range, "
273 "using min %u\n", what, v, min);
280 * Select PLL configuration for 795x parts. This is complicated in
281 * that we cannot determine the optimal parameters without user input.
282 * The reference clock is derived from an external clock through a
283 * multiplier. The external clock is either the host bus (i.e. PCI)
284 * or an external clock generator. When using the PCI bus we assume
285 * the clock is either 33 or 66 MHz; for an external source we cannot
288 * PLL configuration is done with a string: "pci" for PCI bus, or "ext"
289 * for an external source, followed by the frequency. We calculate
290 * the appropriate multiplier and PLL register contents accordingly.
291 * When no configuration is given we default to "pci66" since that
292 * always will allow the card to work. If a card is using the PCI
293 * bus clock and in a 33MHz slot then it will be operating at half
294 * speed until the correct information is provided.
296 * We use a default setting of "ext66" because according to Mike Ham
297 * of HiFn, almost every board in existence has an external crystal
298 * populated at 66Mhz. Using PCI can be a problem on modern motherboards,
299 * because PCI33 can have clocks from 0 to 33Mhz, and some have
300 * non-PCI-compliant spread-spectrum clocks, which can confuse the pll.
303 hifn_getpllconfig(device_t dev, u_int *pll)
306 u_int freq, mul, fl, fh;
310 if (resource_string_value("hifn", device_get_unit(dev),
311 "pllconfig", &pllspec))
315 if (strncmp(pllspec, "ext", 3) == 0) {
317 pllconfig |= HIFN_PLL_REF_SEL;
318 switch (pci_get_device(dev)) {
319 case PCI_PRODUCT_HIFN_7955:
320 case PCI_PRODUCT_HIFN_7956:
324 case PCI_PRODUCT_HIFN_7954:
329 } else if (strncmp(pllspec, "pci", 3) == 0)
331 freq = strtoul(pllspec, &nxt, 10);
335 freq = checkmaxmin(dev, "frequency", freq, fl, fh);
337 * Calculate multiplier. We target a Fck of 266 MHz,
338 * allowing only even values, possibly rounded down.
339 * Multipliers > 8 must set the charge pump current.
341 mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12);
342 pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT;
344 pllconfig |= HIFN_PLL_IS;
349 * Attach an interface that successfully probed.
352 hifn_attach(device_t dev)
354 struct hifn_softc *sc = device_get_softc(dev);
361 KASSERT(sc != NULL, ("hifn_attach: null software carrier!"));
362 bzero(sc, sizeof (*sc));
365 lockinit(&sc->sc_lock, __DECONST(char *, device_get_nameunit(dev)),
368 /* XXX handle power management */
371 * The 7951 and 795x have a random number generator and
372 * public key support; note this.
374 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
375 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
376 pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
377 pci_get_device(dev) == PCI_PRODUCT_HIFN_7956))
378 sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC;
380 * The 7811 has a random number generator and
381 * we also note it's identity 'cuz of some quirks.
383 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
384 pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)
385 sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG;
388 * The 795x parts support AES.
390 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
391 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
392 pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) {
393 sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES;
395 * Select PLL configuration. This depends on the
396 * bus and board design and must be manually configured
397 * if the default setting is unacceptable.
399 hifn_getpllconfig(dev, &sc->sc_pllconfig);
403 * Configure support for memory-mapped access to
404 * registers and for DMA operations.
406 #define PCIM_ENA (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN)
407 cmd = pci_read_config(dev, PCIR_COMMAND, 4);
409 pci_write_config(dev, PCIR_COMMAND, cmd, 4);
410 cmd = pci_read_config(dev, PCIR_COMMAND, 4);
411 if ((cmd & PCIM_ENA) != PCIM_ENA) {
412 device_printf(dev, "failed to enable %s\n",
413 (cmd & PCIM_ENA) == 0 ?
414 "memory mapping & bus mastering" :
415 (cmd & PCIM_CMD_MEMEN) == 0 ?
416 "memory mapping" : "bus mastering");
422 * Setup PCI resources. Note that we record the bus
423 * tag and handle for each register mapping, this is
424 * used by the READ_REG_0, WRITE_REG_0, READ_REG_1,
425 * and WRITE_REG_1 macros throughout the driver.
428 sc->sc_bar0res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
429 0, ~0, 1, RF_ACTIVE);
430 if (sc->sc_bar0res == NULL) {
431 device_printf(dev, "cannot map bar%d register space\n", 0);
434 sc->sc_st0 = rman_get_bustag(sc->sc_bar0res);
435 sc->sc_sh0 = rman_get_bushandle(sc->sc_bar0res);
436 sc->sc_bar0_lastreg = (bus_size_t) -1;
439 sc->sc_bar1res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
440 0, ~0, 1, RF_ACTIVE);
441 if (sc->sc_bar1res == NULL) {
442 device_printf(dev, "cannot map bar%d register space\n", 1);
445 sc->sc_st1 = rman_get_bustag(sc->sc_bar1res);
446 sc->sc_sh1 = rman_get_bushandle(sc->sc_bar1res);
447 sc->sc_bar1_lastreg = (bus_size_t) -1;
452 * Setup the area where the Hifn DMA's descriptors
453 * and associated data structures.
455 if (bus_dma_tag_create(NULL, /* parent */
456 1, 0, /* alignment,boundary */
457 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
458 BUS_SPACE_MAXADDR, /* highaddr */
459 NULL, NULL, /* filter, filterarg */
460 HIFN_MAX_DMALEN, /* maxsize */
461 MAX_SCATTER, /* nsegments */
462 HIFN_MAX_SEGLEN, /* maxsegsize */
463 BUS_DMA_ALLOCNOW, /* flags */
465 device_printf(dev, "cannot allocate DMA tag\n");
468 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
469 device_printf(dev, "cannot create dma map\n");
470 bus_dma_tag_destroy(sc->sc_dmat);
473 if (bus_dmamem_alloc(sc->sc_dmat, (void**) &kva, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
474 device_printf(dev, "cannot alloc dma buffer\n");
475 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
476 bus_dma_tag_destroy(sc->sc_dmat);
479 if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, kva,
480 sizeof (*sc->sc_dma),
481 hifn_dmamap_cb, &sc->sc_dma_physaddr,
483 device_printf(dev, "cannot load dma map\n");
484 bus_dmamem_free(sc->sc_dmat, kva, sc->sc_dmamap);
485 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
486 bus_dma_tag_destroy(sc->sc_dmat);
489 sc->sc_dma = (struct hifn_dma *)kva;
490 bzero(sc->sc_dma, sizeof(*sc->sc_dma));
492 KASSERT(sc->sc_st0 != 0, ("hifn_attach: null bar0 tag!"));
493 KASSERT(sc->sc_sh0 != 0, ("hifn_attach: null bar0 handle!"));
494 KASSERT(sc->sc_st1 != 0, ("hifn_attach: null bar1 tag!"));
495 KASSERT(sc->sc_sh1 != 0, ("hifn_attach: null bar1 handle!"));
498 * Reset the board and do the ``secret handshake''
499 * to enable the crypto support. Then complete the
500 * initialization procedure by setting up the interrupt
501 * and hooking in to the system crypto support so we'll
502 * get used for system services like the crypto device,
503 * IPsec, RNG device, etc.
505 hifn_reset_board(sc, 0);
507 if (hifn_enable_crypto(sc) != 0) {
508 device_printf(dev, "crypto enabling failed\n");
514 hifn_init_pci_registers(sc);
516 /* XXX can't dynamically determine ram type for 795x; force dram */
517 if (sc->sc_flags & HIFN_IS_7956)
518 sc->sc_drammodel = 1;
519 else if (hifn_ramtype(sc))
522 if (sc->sc_drammodel == 0)
528 * Workaround for NetSec 7751 rev A: half ram size because two
529 * of the address lines were left floating
531 if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
532 pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 &&
533 pci_get_revid(dev) == 0x61) /*XXX???*/
534 sc->sc_ramsize >>= 1;
537 * Arrange the interrupt line.
540 sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid,
541 0, ~0, 1, RF_SHAREABLE|RF_ACTIVE);
542 if (sc->sc_irq == NULL) {
543 device_printf(dev, "could not map interrupt\n");
547 * NB: Network code assumes we are blocked with splimp()
548 * so make sure the IRQ is marked appropriately.
550 if (bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
552 &sc->sc_intrhand, NULL)) {
553 device_printf(dev, "could not setup interrupt\n");
560 * NB: Keep only the low 16 bits; this masks the chip id
563 rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff;
565 rseg = sc->sc_ramsize / 1024;
567 if (sc->sc_ramsize >= (1024 * 1024)) {
571 device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram, %u sessions\n",
572 hifn_partname(sc), rev,
573 rseg, rbase, sc->sc_drammodel ? 'd' : 's',
576 if (sc->sc_flags & HIFN_IS_7956)
577 kprintf(", pll=0x%x<%s clk, %ux mult>",
579 sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
580 2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
583 sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
584 if (sc->sc_cid < 0) {
585 device_printf(dev, "could not get crypto driver id\n");
589 WRITE_REG_0(sc, HIFN_0_PUCNFG,
590 READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
591 ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
594 case HIFN_PUSTAT_ENA_2:
595 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
596 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
597 if (sc->sc_flags & HIFN_HAS_AES)
598 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
600 case HIFN_PUSTAT_ENA_1:
601 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
602 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
603 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
604 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
605 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
609 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
610 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
612 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
613 hifn_init_pubrng(sc);
615 /* NB: 1 means the callout runs w/o Giant locked */
616 callout_init_mp(&sc->sc_tickto);
617 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
622 bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
624 /* XXX don't store rid */
625 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
627 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
628 bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
629 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
630 bus_dma_tag_destroy(sc->sc_dmat);
632 /* Turn off DMA polling */
633 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
634 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
636 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
638 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
640 lockuninit(&sc->sc_lock);
645 * Detach an interface that successfully probed.
648 hifn_detach(device_t dev)
650 struct hifn_softc *sc = device_get_softc(dev);
652 KASSERT(sc != NULL, ("hifn_detach: null software carrier!"));
654 /* disable interrupts */
655 WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
657 /*XXX other resources */
658 callout_stop(&sc->sc_tickto);
659 callout_stop(&sc->sc_rngto);
662 rndtest_detach(sc->sc_rndtest);
665 /* Turn off DMA polling */
666 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
667 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
669 crypto_unregister_all(sc->sc_cid);
671 bus_generic_detach(dev); /*XXX should be no children, right? */
673 bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
674 /* XXX don't store rid */
675 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
677 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
678 bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
679 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
680 bus_dma_tag_destroy(sc->sc_dmat);
682 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
683 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
685 lockuninit(&sc->sc_lock);
691 * Stop all chip I/O so that the kernel's probe routines don't
692 * get confused by errant DMAs when rebooting.
695 hifn_shutdown(device_t dev)
698 hifn_stop(device_get_softc(dev));
703 * Device suspend routine. Stop the interface and save some PCI
704 * settings in case the BIOS doesn't restore them properly on
708 hifn_suspend(device_t dev)
710 struct hifn_softc *sc = device_get_softc(dev);
715 for (i = 0; i < 5; i++)
716 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4);
717 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
718 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
719 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
720 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
722 sc->sc_suspended = 1;
728 * Device resume routine. Restore some PCI settings in case the BIOS
729 * doesn't, re-enable busmastering, and restart the interface if
733 hifn_resume(device_t dev)
735 struct hifn_softc *sc = device_get_softc(dev);
739 /* better way to do this? */
740 for (i = 0; i < 5; i++)
741 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4);
742 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4);
743 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1);
744 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1);
745 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1);
747 /* reenable busmastering */
748 pci_enable_busmaster(dev);
749 pci_enable_io(dev, HIFN_RES);
751 /* reinitialize interface if necessary */
752 if (ifp->if_flags & IFF_UP)
755 sc->sc_suspended = 0;
761 hifn_init_pubrng(struct hifn_softc *sc)
767 sc->sc_rndtest = rndtest_attach(sc->sc_dev);
769 sc->sc_harvest = rndtest_harvest;
771 sc->sc_harvest = default_harvest;
773 sc->sc_harvest = default_harvest;
775 if ((sc->sc_flags & HIFN_IS_7811) == 0) {
776 /* Reset 7951 public key/rng engine */
777 WRITE_REG_1(sc, HIFN_1_PUB_RESET,
778 READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
780 for (i = 0; i < 100; i++) {
782 if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
783 HIFN_PUBRST_RESET) == 0)
788 device_printf(sc->sc_dev, "public key init failed\n");
794 /* Enable the rng, if available */
795 if (sc->sc_flags & HIFN_HAS_RNG) {
796 if (sc->sc_flags & HIFN_IS_7811) {
797 r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
798 if (r & HIFN_7811_RNGENA_ENA) {
799 r &= ~HIFN_7811_RNGENA_ENA;
800 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
802 WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
803 HIFN_7811_RNGCFG_DEFL);
804 r |= HIFN_7811_RNGENA_ENA;
805 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
807 WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
808 READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
813 sc->sc_rnghz = hz / 100;
816 /* NB: 1 means the callout runs w/o Giant locked */
817 callout_init_mp(&sc->sc_rngto);
818 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
822 /* Enable public key engine, if available */
823 if (sc->sc_flags & HIFN_HAS_PUBLIC) {
824 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
825 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
826 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
836 #define RANDOM_BITS(n) (n)*sizeof (u_int32_t), (n)*sizeof (u_int32_t)*NBBY, 0
837 struct hifn_softc *sc = vsc;
838 u_int32_t sts, num[2];
841 if (sc->sc_flags & HIFN_IS_7811) {
842 for (i = 0; i < 5; i++) {
843 sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
844 if (sts & HIFN_7811_RNGSTS_UFL) {
845 device_printf(sc->sc_dev,
846 "RNG underflow: disabling\n");
849 if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
853 * There are at least two words in the RNG FIFO
856 num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
857 num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
858 /* NB: discard first data read */
862 (*sc->sc_harvest)(sc->sc_rndtest,
866 num[0] = READ_REG_1(sc, HIFN_1_RNG_DATA);
868 /* NB: discard first data read */
872 (*sc->sc_harvest)(sc->sc_rndtest,
873 num, sizeof (num[0]));
876 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
882 hifn_puc_wait(struct hifn_softc *sc)
885 int reg = HIFN_0_PUCTRL;
887 if (sc->sc_flags & HIFN_IS_7956) {
888 reg = HIFN_0_PUCTRL2;
891 for (i = 5000; i > 0; i--) {
893 if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET))
897 device_printf(sc->sc_dev, "proc unit did not reset\n");
901 * Reset the processing unit.
904 hifn_reset_puc(struct hifn_softc *sc)
906 int reg = HIFN_0_PUCTRL;
908 if (sc->sc_flags & HIFN_IS_7956) {
909 reg = HIFN_0_PUCTRL2;
912 /* Reset processing unit */
913 WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA);
918 * Set the Retry and TRDY registers; note that we set them to
919 * zero because the 7811 locks up when forced to retry (section
920 * 3.6 of "Specification Update SU-0014-04". Not clear if we
921 * should do this for all Hifn parts, but it doesn't seem to hurt.
924 hifn_set_retry(struct hifn_softc *sc)
926 /* NB: RETRY only responds to 8-bit reads/writes */
927 pci_write_config(sc->sc_dev, HIFN_RETRY_TIMEOUT, 0, 1);
928 pci_write_config(sc->sc_dev, HIFN_TRDY_TIMEOUT, 0, 4);
932 * Resets the board. Values in the regesters are left as is
933 * from the reset (i.e. initial values are assigned elsewhere).
936 hifn_reset_board(struct hifn_softc *sc, int full)
941 * Set polling in the DMA configuration register to zero. 0x7 avoids
942 * resetting the board and zeros out the other fields.
944 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
945 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
948 * Now that polling has been disabled, we have to wait 1 ms
949 * before resetting the board.
953 /* Reset the DMA unit */
955 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
958 WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
959 HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
963 KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!"));
964 bzero(sc->sc_dma, sizeof(*sc->sc_dma));
966 /* Bring dma unit out of reset */
967 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
968 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
973 if (sc->sc_flags & HIFN_IS_7811) {
974 for (reg = 0; reg < 1000; reg++) {
975 if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
976 HIFN_MIPSRST_CRAMINIT)
981 kprintf(": cram init timeout\n");
983 /* set up DMA configuration register #2 */
984 /* turn off all PK and BAR0 swaps */
985 WRITE_REG_1(sc, HIFN_1_DMA_CNFG2,
986 (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)|
987 (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)|
988 (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)|
989 (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT));
994 hifn_next_signature(u_int32_t a, u_int cnt)
999 for (i = 0; i < cnt; i++) {
1001 /* get the parity */
1009 a = (v & 1) ^ (a << 1);
1020 static struct pci2id pci2id[] = {
1023 PCI_PRODUCT_HIFN_7951,
1024 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1025 0x00, 0x00, 0x00, 0x00, 0x00 }
1028 PCI_PRODUCT_HIFN_7955,
1029 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1030 0x00, 0x00, 0x00, 0x00, 0x00 }
1033 PCI_PRODUCT_HIFN_7956,
1034 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1035 0x00, 0x00, 0x00, 0x00, 0x00 }
1038 PCI_PRODUCT_NETSEC_7751,
1039 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1040 0x00, 0x00, 0x00, 0x00, 0x00 }
1042 PCI_VENDOR_INVERTEX,
1043 PCI_PRODUCT_INVERTEX_AEON,
1044 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1045 0x00, 0x00, 0x00, 0x00, 0x00 }
1048 PCI_PRODUCT_HIFN_7811,
1049 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1050 0x00, 0x00, 0x00, 0x00, 0x00 }
1053 * Other vendors share this PCI ID as well, such as
1054 * http://www.powercrypt.com, and obviously they also
1058 PCI_PRODUCT_HIFN_7751,
1059 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1060 0x00, 0x00, 0x00, 0x00, 0x00 }
1065 * Checks to see if crypto is already enabled. If crypto isn't enable,
1066 * "hifn_enable_crypto" is called to enable it. The check is important,
1067 * as enabling crypto twice will lock the board.
1070 hifn_enable_crypto(struct hifn_softc *sc)
1072 u_int32_t dmacfg, ramcfg, encl, addr, i;
1073 char *offtbl = NULL;
1075 for (i = 0; i < NELEM(pci2id); i++) {
1076 if (pci2id[i].pci_vendor == pci_get_vendor(sc->sc_dev) &&
1077 pci2id[i].pci_prod == pci_get_device(sc->sc_dev)) {
1078 offtbl = pci2id[i].card_id;
1082 if (offtbl == NULL) {
1083 device_printf(sc->sc_dev, "Unknown card!\n");
1087 ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1088 dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
1091 * The RAM config register's encrypt level bit needs to be set before
1092 * every read performed on the encryption level register.
1094 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
1096 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
1099 * Make sure we don't re-unlock. Two unlocks kills chip until the
1102 if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
1105 device_printf(sc->sc_dev,
1106 "Strong crypto already enabled!\n");
1111 if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
1114 device_printf(sc->sc_dev,
1115 "Unknown encryption level 0x%x\n", encl);
1120 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
1121 HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
1123 addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1);
1125 WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0);
1128 for (i = 0; i <= 12; i++) {
1129 addr = hifn_next_signature(addr, offtbl[i] + 0x101);
1130 WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr);
1135 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
1136 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
1140 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
1141 device_printf(sc->sc_dev, "Engine is permanently "
1142 "locked until next system reset!\n");
1144 device_printf(sc->sc_dev, "Engine enabled "
1150 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
1151 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
1154 case HIFN_PUSTAT_ENA_1:
1155 case HIFN_PUSTAT_ENA_2:
1157 case HIFN_PUSTAT_ENA_0:
1159 device_printf(sc->sc_dev, "disabled");
1167 * Give initial values to the registers listed in the "Register Space"
1168 * section of the HIFN Software Development reference manual.
1171 hifn_init_pci_registers(struct hifn_softc *sc)
1173 /* write fixed values needed by the Initialization registers */
1174 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
1175 WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
1176 WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
1178 /* write all 4 ring address registers */
1179 WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr +
1180 offsetof(struct hifn_dma, cmdr[0]));
1181 WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr +
1182 offsetof(struct hifn_dma, srcr[0]));
1183 WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr +
1184 offsetof(struct hifn_dma, dstr[0]));
1185 WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr +
1186 offsetof(struct hifn_dma, resr[0]));
1190 /* write status register */
1191 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1192 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
1193 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
1194 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
1195 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
1196 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
1197 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
1198 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
1199 HIFN_DMACSR_S_WAIT |
1200 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
1201 HIFN_DMACSR_C_WAIT |
1202 HIFN_DMACSR_ENGINE |
1203 ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
1204 HIFN_DMACSR_PUBDONE : 0) |
1205 ((sc->sc_flags & HIFN_IS_7811) ?
1206 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
1208 sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
1209 sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
1210 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
1211 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
1212 ((sc->sc_flags & HIFN_IS_7811) ?
1213 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
1214 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1215 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1217 if (sc->sc_flags & HIFN_IS_7956) {
1220 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1221 HIFN_PUCNFG_TCALLPHASES |
1222 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
1224 /* turn off the clocks and insure bypass is set */
1225 pll = READ_REG_1(sc, HIFN_1_PLL);
1226 pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL))
1227 | HIFN_PLL_BP | HIFN_PLL_MBSET;
1228 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1229 DELAY(10*1000); /* 10ms */
1230 /* change configuration */
1231 pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig;
1232 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1233 DELAY(10*1000); /* 10ms */
1234 /* disable bypass */
1235 pll &= ~HIFN_PLL_BP;
1236 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1237 /* enable clocks with new configuration */
1238 pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL;
1239 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1241 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1242 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
1243 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
1244 (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
1247 WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1248 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
1249 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
1250 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
1251 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
1255 * The maximum number of sessions supported by the card
1256 * is dependent on the amount of context ram, which
1257 * encryption algorithms are enabled, and how compression
1258 * is configured. This should be configured before this
1259 * routine is called.
1262 hifn_sessions(struct hifn_softc *sc)
1267 pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1269 if (pucnfg & HIFN_PUCNFG_COMPSING) {
1270 if (pucnfg & HIFN_PUCNFG_ENCCNFG)
1275 * 7955/7956 has internal context memory of 32K
1277 if (sc->sc_flags & HIFN_IS_7956)
1278 sc->sc_maxses = 32768 / ctxsize;
1281 ((sc->sc_ramsize - 32768) / ctxsize);
1283 sc->sc_maxses = sc->sc_ramsize / 16384;
1285 if (sc->sc_maxses > 2048)
1286 sc->sc_maxses = 2048;
1290 * Determine ram type (sram or dram). Board should be just out of a reset
1291 * state when this is called.
1294 hifn_ramtype(struct hifn_softc *sc)
1296 u_int8_t data[8], dataexpect[8];
1299 for (i = 0; i < sizeof(data); i++)
1300 data[i] = dataexpect[i] = 0x55;
1301 if (hifn_writeramaddr(sc, 0, data))
1303 if (hifn_readramaddr(sc, 0, data))
1305 if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1306 sc->sc_drammodel = 1;
1310 for (i = 0; i < sizeof(data); i++)
1311 data[i] = dataexpect[i] = 0xaa;
1312 if (hifn_writeramaddr(sc, 0, data))
1314 if (hifn_readramaddr(sc, 0, data))
1316 if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1317 sc->sc_drammodel = 1;
1324 #define HIFN_SRAM_MAX (32 << 20)
1325 #define HIFN_SRAM_STEP_SIZE 16384
1326 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1329 hifn_sramsize(struct hifn_softc *sc)
1333 u_int8_t dataexpect[sizeof(data)];
1336 for (i = 0; i < sizeof(data); i++)
1337 data[i] = dataexpect[i] = i ^ 0x5a;
1339 for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
1340 a = i * HIFN_SRAM_STEP_SIZE;
1341 bcopy(&i, data, sizeof(i));
1342 hifn_writeramaddr(sc, a, data);
1345 for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1346 a = i * HIFN_SRAM_STEP_SIZE;
1347 bcopy(&i, dataexpect, sizeof(i));
1348 if (hifn_readramaddr(sc, a, data) < 0)
1350 if (bcmp(data, dataexpect, sizeof(data)) != 0)
1352 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
1359 * XXX For dram boards, one should really try all of the
1360 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG
1361 * is already set up correctly.
1364 hifn_dramsize(struct hifn_softc *sc)
1368 if (sc->sc_flags & HIFN_IS_7956) {
1370 * 7955/7956 have a fixed internal ram of only 32K.
1372 sc->sc_ramsize = 32768;
1374 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1375 HIFN_PUCNFG_DRAMMASK;
1376 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1382 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp)
1384 struct hifn_dma *dma = sc->sc_dma;
1386 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1388 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1389 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1390 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1391 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1393 *cmdp = dma->cmdi++;
1394 dma->cmdk = dma->cmdi;
1396 if (dma->srci == HIFN_D_SRC_RSIZE) {
1398 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
1399 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1400 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1401 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1403 *srcp = dma->srci++;
1404 dma->srck = dma->srci;
1406 if (dma->dsti == HIFN_D_DST_RSIZE) {
1408 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
1409 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1410 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1411 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1413 *dstp = dma->dsti++;
1414 dma->dstk = dma->dsti;
1416 if (dma->resi == HIFN_D_RES_RSIZE) {
1418 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1419 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1420 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1421 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1423 *resp = dma->resi++;
1424 dma->resk = dma->resi;
1428 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1430 struct hifn_dma *dma = sc->sc_dma;
1431 hifn_base_command_t wc;
1432 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1433 int r, cmdi, resi, srci, dsti;
1435 wc.masks = htole16(3 << 13);
1436 wc.session_num = htole16(addr >> 14);
1437 wc.total_source_count = htole16(8);
1438 wc.total_dest_count = htole16(addr & 0x3fff);
1440 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1442 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1443 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1444 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1446 /* build write command */
1447 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1448 *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc;
1449 bcopy(data, &dma->test_src, sizeof(dma->test_src));
1451 dma->srcr[srci].p = htole32(sc->sc_dma_physaddr
1452 + offsetof(struct hifn_dma, test_src));
1453 dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr
1454 + offsetof(struct hifn_dma, test_dst));
1456 dma->cmdr[cmdi].l = htole32(16 | masks);
1457 dma->srcr[srci].l = htole32(8 | masks);
1458 dma->dstr[dsti].l = htole32(4 | masks);
1459 dma->resr[resi].l = htole32(4 | masks);
1461 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1462 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1464 for (r = 10000; r >= 0; r--) {
1466 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1467 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1468 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1470 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1471 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1474 device_printf(sc->sc_dev, "writeramaddr -- "
1475 "result[%d](addr %d) still valid\n", resi, addr);
1481 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1482 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1483 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1489 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1491 struct hifn_dma *dma = sc->sc_dma;
1492 hifn_base_command_t rc;
1493 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1494 int r, cmdi, srci, dsti, resi;
1496 rc.masks = htole16(2 << 13);
1497 rc.session_num = htole16(addr >> 14);
1498 rc.total_source_count = htole16(addr & 0x3fff);
1499 rc.total_dest_count = htole16(8);
1501 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1503 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1504 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1505 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1507 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1508 *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc;
1510 dma->srcr[srci].p = htole32(sc->sc_dma_physaddr +
1511 offsetof(struct hifn_dma, test_src));
1513 dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr +
1514 offsetof(struct hifn_dma, test_dst));
1516 dma->cmdr[cmdi].l = htole32(8 | masks);
1517 dma->srcr[srci].l = htole32(8 | masks);
1518 dma->dstr[dsti].l = htole32(8 | masks);
1519 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1521 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1522 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1524 for (r = 10000; r >= 0; r--) {
1526 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1527 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1528 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1530 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1531 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1534 device_printf(sc->sc_dev, "readramaddr -- "
1535 "result[%d](addr %d) still valid\n", resi, addr);
1539 bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
1542 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1543 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1544 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1550 * Initialize the descriptor rings.
1553 hifn_init_dma(struct hifn_softc *sc)
1555 struct hifn_dma *dma = sc->sc_dma;
1560 /* initialize static pointer values */
1561 for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1562 dma->cmdr[i].p = htole32(sc->sc_dma_physaddr +
1563 offsetof(struct hifn_dma, command_bufs[i][0]));
1564 for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1565 dma->resr[i].p = htole32(sc->sc_dma_physaddr +
1566 offsetof(struct hifn_dma, result_bufs[i][0]));
1568 dma->cmdr[HIFN_D_CMD_RSIZE].p =
1569 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0]));
1570 dma->srcr[HIFN_D_SRC_RSIZE].p =
1571 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0]));
1572 dma->dstr[HIFN_D_DST_RSIZE].p =
1573 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0]));
1574 dma->resr[HIFN_D_RES_RSIZE].p =
1575 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0]));
1577 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
1578 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
1579 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
1583 * Writes out the raw command buffer space. Returns the
1584 * command buffer size.
1587 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
1590 hifn_base_command_t *base_cmd;
1591 hifn_mac_command_t *mac_cmd;
1592 hifn_crypt_command_t *cry_cmd;
1593 int using_mac, using_crypt, len, ivlen;
1594 u_int32_t dlen, slen;
1597 using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1598 using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1600 base_cmd = (hifn_base_command_t *)buf_pos;
1601 base_cmd->masks = htole16(cmd->base_masks);
1602 slen = cmd->src_mapsize;
1604 dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t);
1606 dlen = cmd->dst_mapsize;
1607 base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1608 base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1612 base_cmd->session_num = htole16(
1613 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1614 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1615 buf_pos += sizeof(hifn_base_command_t);
1618 mac_cmd = (hifn_mac_command_t *)buf_pos;
1619 dlen = cmd->maccrd->crd_len;
1620 mac_cmd->source_count = htole16(dlen & 0xffff);
1622 mac_cmd->masks = htole16(cmd->mac_masks |
1623 ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1624 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1625 mac_cmd->reserved = 0;
1626 buf_pos += sizeof(hifn_mac_command_t);
1630 cry_cmd = (hifn_crypt_command_t *)buf_pos;
1631 dlen = cmd->enccrd->crd_len;
1632 cry_cmd->source_count = htole16(dlen & 0xffff);
1634 cry_cmd->masks = htole16(cmd->cry_masks |
1635 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1636 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1637 cry_cmd->reserved = 0;
1638 buf_pos += sizeof(hifn_crypt_command_t);
1641 if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1642 bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
1643 buf_pos += HIFN_MAC_KEY_LENGTH;
1646 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1647 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1648 case HIFN_CRYPT_CMD_ALG_3DES:
1649 bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
1650 buf_pos += HIFN_3DES_KEY_LENGTH;
1652 case HIFN_CRYPT_CMD_ALG_DES:
1653 bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
1654 buf_pos += HIFN_DES_KEY_LENGTH;
1656 case HIFN_CRYPT_CMD_ALG_RC4:
1661 clen = MIN(cmd->cklen, len);
1662 bcopy(cmd->ck, buf_pos, clen);
1669 case HIFN_CRYPT_CMD_ALG_AES:
1671 * AES keys are variable 128, 192 and
1672 * 256 bits (16, 24 and 32 bytes).
1674 bcopy(cmd->ck, buf_pos, cmd->cklen);
1675 buf_pos += cmd->cklen;
1680 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1681 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1682 case HIFN_CRYPT_CMD_ALG_AES:
1683 ivlen = HIFN_AES_IV_LENGTH;
1686 ivlen = HIFN_IV_LENGTH;
1689 bcopy(cmd->iv, buf_pos, ivlen);
1693 if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) {
1698 return (buf_pos - buf);
1703 hifn_dmamap_aligned(struct hifn_operand *op)
1707 for (i = 0; i < op->nsegs; i++) {
1708 if (op->segs[i].ds_addr & 3)
1710 if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3))
1717 hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx)
1719 struct hifn_dma *dma = sc->sc_dma;
1721 if (++idx == HIFN_D_DST_RSIZE) {
1722 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1723 HIFN_D_MASKDONEIRQ);
1724 HIFN_DSTR_SYNC(sc, idx,
1725 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1732 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1734 struct hifn_dma *dma = sc->sc_dma;
1735 struct hifn_operand *dst = &cmd->dst;
1737 int idx, used = 0, i;
1740 for (i = 0; i < dst->nsegs - 1; i++) {
1741 dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1742 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1743 HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len);
1744 HIFN_DSTR_SYNC(sc, idx,
1745 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1748 idx = hifn_dmamap_dstwrap(sc, idx);
1751 if (cmd->sloplen == 0) {
1752 p = dst->segs[i].ds_addr;
1753 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1754 dst->segs[i].ds_len;
1756 p = sc->sc_dma_physaddr +
1757 offsetof(struct hifn_dma, slop[cmd->slopidx]);
1758 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1761 if ((dst->segs[i].ds_len - cmd->sloplen) != 0) {
1762 dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1763 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1764 HIFN_D_MASKDONEIRQ |
1765 (dst->segs[i].ds_len - cmd->sloplen));
1766 HIFN_DSTR_SYNC(sc, idx,
1767 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1770 idx = hifn_dmamap_dstwrap(sc, idx);
1773 dma->dstr[idx].p = htole32(p);
1774 dma->dstr[idx].l = htole32(l);
1775 HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1778 idx = hifn_dmamap_dstwrap(sc, idx);
1786 hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx)
1788 struct hifn_dma *dma = sc->sc_dma;
1790 if (++idx == HIFN_D_SRC_RSIZE) {
1791 dma->srcr[idx].l = htole32(HIFN_D_VALID |
1792 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1793 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1794 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1801 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1803 struct hifn_dma *dma = sc->sc_dma;
1804 struct hifn_operand *src = &cmd->src;
1809 for (i = 0; i < src->nsegs; i++) {
1810 if (i == src->nsegs - 1)
1813 dma->srcr[idx].p = htole32(src->segs[i].ds_addr);
1814 dma->srcr[idx].l = htole32(src->segs[i].ds_len |
1815 HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1816 HIFN_SRCR_SYNC(sc, idx,
1817 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1819 idx = hifn_dmamap_srcwrap(sc, idx);
1822 dma->srcu += src->nsegs;
1827 hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error)
1829 struct hifn_operand *op = arg;
1831 KASSERT(nsegs <= MAX_SCATTER,
1832 ("hifn_op_cb: too many DMA segments (%u > %u) "
1833 "returned when mapping operand", nsegs, MAX_SCATTER));
1834 op->mapsize = mapsize;
1836 bcopy(seg, op->segs, nsegs * sizeof (seg[0]));
1841 struct hifn_softc *sc,
1842 struct hifn_command *cmd,
1843 struct cryptop *crp,
1846 struct hifn_dma *dma = sc->sc_dma;
1847 u_int32_t cmdlen, csr;
1848 int cmdi, resi, err = 0;
1851 * need 1 cmd, and 1 res
1853 * NB: check this first since it's easy.
1856 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
1857 (dma->resu + 1) > HIFN_D_RES_RSIZE) {
1860 device_printf(sc->sc_dev,
1861 "cmd/result exhaustion, cmdu %u resu %u\n",
1862 dma->cmdu, dma->resu);
1865 hifnstats.hst_nomem_cr++;
1870 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->src_map)) {
1871 hifnstats.hst_nomem_map++;
1876 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1877 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
1878 cmd->src_m, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) {
1879 hifnstats.hst_nomem_load++;
1883 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1885 cmd->src_io->uio_segflg = UIO_USERSPACE;
1887 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
1888 cmd->src_io, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) {
1889 hifnstats.hst_nomem_load++;
1898 if (hifn_dmamap_aligned(&cmd->src)) {
1899 cmd->sloplen = cmd->src_mapsize & 3;
1900 cmd->dst = cmd->src;
1902 if (crp->crp_flags & CRYPTO_F_IOV) {
1905 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1907 struct mbuf *m, *m0, *mlast;
1909 KASSERT(cmd->dst_m == cmd->src_m,
1910 ("hifn_crypto: dst_m initialized improperly"));
1911 hifnstats.hst_unaligned++;
1913 * Source is not aligned on a longword boundary.
1914 * Copy the data to insure alignment. If we fail
1915 * to allocate mbufs or clusters while doing this
1916 * we return ERESTART so the operation is requeued
1917 * at the crypto later, but only if there are
1918 * ops already posted to the hardware; otherwise we
1919 * have no guarantee that we'll be re-entered.
1921 totlen = cmd->src_mapsize;
1922 if (cmd->src_m->m_flags & M_PKTHDR) {
1924 MGETHDR(m0, MB_DONTWAIT, MT_DATA);
1925 if (m0 && !m_dup_pkthdr(m0, cmd->src_m, MB_DONTWAIT)) {
1931 MGET(m0, MB_DONTWAIT, MT_DATA);
1934 hifnstats.hst_nomem_mbuf++;
1935 err = dma->cmdu ? ERESTART : ENOMEM;
1938 if (totlen >= MINCLSIZE) {
1939 MCLGET(m0, MB_DONTWAIT);
1940 if ((m0->m_flags & M_EXT) == 0) {
1941 hifnstats.hst_nomem_mcl++;
1942 err = dma->cmdu ? ERESTART : ENOMEM;
1949 m0->m_pkthdr.len = m0->m_len = len;
1952 while (totlen > 0) {
1953 MGET(m, MB_DONTWAIT, MT_DATA);
1955 hifnstats.hst_nomem_mbuf++;
1956 err = dma->cmdu ? ERESTART : ENOMEM;
1961 if (totlen >= MINCLSIZE) {
1962 MCLGET(m, MB_DONTWAIT);
1963 if ((m->m_flags & M_EXT) == 0) {
1964 hifnstats.hst_nomem_mcl++;
1965 err = dma->cmdu ? ERESTART : ENOMEM;
1974 m0->m_pkthdr.len += len;
1984 if (cmd->dst_map == NULL) {
1985 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->dst_map)) {
1986 hifnstats.hst_nomem_map++;
1990 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1991 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
1992 cmd->dst_m, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) {
1993 hifnstats.hst_nomem_map++;
1997 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1999 cmd->dst_io->uio_segflg |= UIO_USERSPACE;
2001 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
2002 cmd->dst_io, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) {
2003 hifnstats.hst_nomem_load++;
2012 device_printf(sc->sc_dev,
2013 "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
2014 READ_REG_1(sc, HIFN_1_DMA_CSR),
2015 READ_REG_1(sc, HIFN_1_DMA_IER),
2016 dma->cmdu, dma->srcu, dma->dstu, dma->resu,
2017 cmd->src_nsegs, cmd->dst_nsegs);
2021 if (cmd->src_map == cmd->dst_map) {
2022 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2023 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2025 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2026 BUS_DMASYNC_PREWRITE);
2027 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2028 BUS_DMASYNC_PREREAD);
2032 * need N src, and N dst
2034 if ((dma->srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE ||
2035 (dma->dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) {
2038 device_printf(sc->sc_dev,
2039 "src/dst exhaustion, srcu %u+%u dstu %u+%u\n",
2040 dma->srcu, cmd->src_nsegs,
2041 dma->dstu, cmd->dst_nsegs);
2044 hifnstats.hst_nomem_sd++;
2049 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
2051 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
2052 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2053 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
2054 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2057 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
2058 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
2060 /* .p for command/result already set */
2061 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
2062 HIFN_D_MASKDONEIRQ);
2063 HIFN_CMDR_SYNC(sc, cmdi,
2064 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2068 * We don't worry about missing an interrupt (which a "command wait"
2069 * interrupt salvages us from), unless there is more than one command
2072 if (dma->cmdu > 1) {
2073 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
2074 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2077 hifnstats.hst_ipackets++;
2078 hifnstats.hst_ibytes += cmd->src_mapsize;
2080 hifn_dmamap_load_src(sc, cmd);
2083 * Unlike other descriptors, we don't mask done interrupt from
2084 * result descriptor.
2088 kprintf("load res\n");
2090 if (dma->resi == HIFN_D_RES_RSIZE) {
2092 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
2093 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2094 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
2095 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2098 KASSERT(dma->hifn_commands[resi] == NULL,
2099 ("hifn_crypto: command slot %u busy", resi));
2100 dma->hifn_commands[resi] = cmd;
2101 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
2102 if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) {
2103 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2104 HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
2106 if (sc->sc_curbatch > hifnstats.hst_maxbatch)
2107 hifnstats.hst_maxbatch = sc->sc_curbatch;
2108 hifnstats.hst_totbatch++;
2110 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2111 HIFN_D_VALID | HIFN_D_LAST);
2112 sc->sc_curbatch = 0;
2114 HIFN_RESR_SYNC(sc, resi,
2115 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2119 cmd->slopidx = resi;
2121 hifn_dmamap_load_dst(sc, cmd);
2124 if (sc->sc_c_busy == 0) {
2125 csr |= HIFN_DMACSR_C_CTRL_ENA;
2128 if (sc->sc_s_busy == 0) {
2129 csr |= HIFN_DMACSR_S_CTRL_ENA;
2132 if (sc->sc_r_busy == 0) {
2133 csr |= HIFN_DMACSR_R_CTRL_ENA;
2136 if (sc->sc_d_busy == 0) {
2137 csr |= HIFN_DMACSR_D_CTRL_ENA;
2141 WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr);
2145 device_printf(sc->sc_dev, "command: stat %8x ier %8x\n",
2146 READ_REG_1(sc, HIFN_1_DMA_CSR),
2147 READ_REG_1(sc, HIFN_1_DMA_IER));
2153 KASSERT(err == 0, ("hifn_crypto: success with error %u", err));
2154 return (err); /* success */
2157 if (cmd->src_map != cmd->dst_map)
2158 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2160 if (cmd->src_map != cmd->dst_map)
2161 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2163 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2164 if (cmd->src_m != cmd->dst_m)
2165 m_freem(cmd->dst_m);
2167 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2169 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2175 hifn_tick(void* vsc)
2177 struct hifn_softc *sc = vsc;
2180 if (sc->sc_active == 0) {
2181 struct hifn_dma *dma = sc->sc_dma;
2184 if (dma->cmdu == 0 && sc->sc_c_busy) {
2186 r |= HIFN_DMACSR_C_CTRL_DIS;
2188 if (dma->srcu == 0 && sc->sc_s_busy) {
2190 r |= HIFN_DMACSR_S_CTRL_DIS;
2192 if (dma->dstu == 0 && sc->sc_d_busy) {
2194 r |= HIFN_DMACSR_D_CTRL_DIS;
2196 if (dma->resu == 0 && sc->sc_r_busy) {
2198 r |= HIFN_DMACSR_R_CTRL_DIS;
2201 WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
2205 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
2209 hifn_intr(void *arg)
2211 struct hifn_softc *sc = arg;
2212 struct hifn_dma *dma;
2213 u_int32_t dmacsr, restart;
2216 dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
2218 /* Nothing in the DMA unit interrupted */
2219 if ((dmacsr & sc->sc_dmaier) == 0) {
2220 hifnstats.hst_noirq++;
2230 device_printf(sc->sc_dev,
2231 "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n",
2232 dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier,
2233 dma->cmdi, dma->srci, dma->dsti, dma->resi,
2234 dma->cmdk, dma->srck, dma->dstk, dma->resk,
2235 dma->cmdu, dma->srcu, dma->dstu, dma->resu);
2239 WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
2241 if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
2242 (dmacsr & HIFN_DMACSR_PUBDONE))
2243 WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
2244 READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
2246 restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER);
2248 device_printf(sc->sc_dev, "overrun %x\n", dmacsr);
2250 if (sc->sc_flags & HIFN_IS_7811) {
2251 if (dmacsr & HIFN_DMACSR_ILLR)
2252 device_printf(sc->sc_dev, "illegal read\n");
2253 if (dmacsr & HIFN_DMACSR_ILLW)
2254 device_printf(sc->sc_dev, "illegal write\n");
2257 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
2258 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
2260 device_printf(sc->sc_dev, "abort, resetting.\n");
2261 hifnstats.hst_abort++;
2267 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
2269 * If no slots to process and we receive a "waiting on
2270 * command" interrupt, we disable the "waiting on command"
2273 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
2274 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2277 /* clear the rings */
2278 i = dma->resk; u = dma->resu;
2280 HIFN_RESR_SYNC(sc, i,
2281 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2282 if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
2283 HIFN_RESR_SYNC(sc, i,
2284 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2288 if (i != HIFN_D_RES_RSIZE) {
2289 struct hifn_command *cmd;
2290 u_int8_t *macbuf = NULL;
2292 HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
2293 cmd = dma->hifn_commands[i];
2294 KASSERT(cmd != NULL,
2295 ("hifn_intr: null command slot %u", i));
2296 dma->hifn_commands[i] = NULL;
2298 if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2299 macbuf = dma->result_bufs[i];
2303 hifn_callback(sc, cmd, macbuf);
2304 hifnstats.hst_opackets++;
2308 if (++i == (HIFN_D_RES_RSIZE + 1))
2311 dma->resk = i; dma->resu = u;
2313 i = dma->srck; u = dma->srcu;
2315 if (i == HIFN_D_SRC_RSIZE)
2317 HIFN_SRCR_SYNC(sc, i,
2318 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2319 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
2320 HIFN_SRCR_SYNC(sc, i,
2321 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2326 dma->srck = i; dma->srcu = u;
2328 i = dma->cmdk; u = dma->cmdu;
2330 HIFN_CMDR_SYNC(sc, i,
2331 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2332 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
2333 HIFN_CMDR_SYNC(sc, i,
2334 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2337 if (i != HIFN_D_CMD_RSIZE) {
2339 HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
2341 if (++i == (HIFN_D_CMD_RSIZE + 1))
2344 dma->cmdk = i; dma->cmdu = u;
2348 if (sc->sc_needwakeup) { /* XXX check high watermark */
2349 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
2352 device_printf(sc->sc_dev,
2353 "wakeup crypto (%x) u %d/%d/%d/%d\n",
2355 dma->cmdu, dma->srcu, dma->dstu, dma->resu);
2357 sc->sc_needwakeup &= ~wakeup;
2358 crypto_unblock(sc->sc_cid, wakeup);
2363 * Allocate a new 'session' and return an encoded session id. 'sidp'
2364 * contains our registration id, and should contain an encoded session
2365 * id on successful allocation.
2368 hifn_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
2370 struct cryptoini *c;
2371 struct hifn_softc *sc = device_get_softc(dev);
2372 int mac = 0, cry = 0, sesn;
2373 struct hifn_session *ses = NULL;
2375 KASSERT(sc != NULL, ("hifn_newsession: null softc"));
2376 if (sidp == NULL || cri == NULL || sc == NULL)
2380 if (sc->sc_sessions == NULL) {
2381 ses = sc->sc_sessions = (struct hifn_session *)kmalloc(
2382 sizeof(*ses), M_DEVBUF, M_NOWAIT);
2388 sc->sc_nsessions = 1;
2390 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
2391 if (!sc->sc_sessions[sesn].hs_used) {
2392 ses = &sc->sc_sessions[sesn];
2398 sesn = sc->sc_nsessions;
2399 ses = (struct hifn_session *)kmalloc((sesn + 1) *
2400 sizeof(*ses), M_DEVBUF, M_NOWAIT);
2405 bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses));
2406 bzero(sc->sc_sessions, sesn * sizeof(*ses));
2407 kfree(sc->sc_sessions, M_DEVBUF);
2408 sc->sc_sessions = ses;
2409 ses = &sc->sc_sessions[sesn];
2415 bzero(ses, sizeof(*ses));
2418 for (c = cri; c != NULL; c = c->cri_next) {
2419 switch (c->cri_alg) {
2422 case CRYPTO_MD5_HMAC:
2423 case CRYPTO_SHA1_HMAC:
2427 ses->hs_mlen = c->cri_mlen;
2428 if (ses->hs_mlen == 0) {
2429 switch (c->cri_alg) {
2431 case CRYPTO_MD5_HMAC:
2435 case CRYPTO_SHA1_HMAC:
2441 case CRYPTO_DES_CBC:
2442 case CRYPTO_3DES_CBC:
2443 case CRYPTO_AES_CBC:
2444 /* XXX this may read fewer, does it matter? */
2445 read_random(ses->hs_iv,
2446 c->cri_alg == CRYPTO_AES_CBC ?
2447 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2458 if (mac == 0 && cry == 0)
2461 *sidp = HIFN_SID(device_get_unit(sc->sc_dev), sesn);
2467 * Deallocate a session.
2468 * XXX this routine should run a zero'd mac/encrypt key into context ram.
2469 * XXX to blow away any keys already stored there.
2471 #define CRYPTO_SESID2LID(_sid) (((u_int32_t) (_sid)) & 0xffffffff)
2474 hifn_freesession(device_t dev, u_int64_t tid)
2476 struct hifn_softc *sc = device_get_softc(dev);
2478 u_int32_t sid = CRYPTO_SESID2LID(tid);
2480 KASSERT(sc != NULL, ("hifn_freesession: null softc"));
2485 session = HIFN_SESSION(sid);
2486 if (session < sc->sc_nsessions) {
2487 bzero(&sc->sc_sessions[session], sizeof(struct hifn_session));
2497 hifn_process(device_t dev, struct cryptop *crp, int hint)
2499 struct hifn_softc *sc = device_get_softc(dev);
2500 struct hifn_command *cmd = NULL;
2501 int session, err, ivlen;
2502 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
2504 if (crp == NULL || crp->crp_callback == NULL) {
2505 hifnstats.hst_invalid++;
2508 session = HIFN_SESSION(crp->crp_sid);
2510 if (sc == NULL || session >= sc->sc_nsessions) {
2515 cmd = kmalloc(sizeof(struct hifn_command), M_DEVBUF, M_INTWAIT | M_ZERO);
2517 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2518 cmd->src_m = (struct mbuf *)crp->crp_buf;
2519 cmd->dst_m = (struct mbuf *)crp->crp_buf;
2520 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2521 cmd->src_io = (struct uio *)crp->crp_buf;
2522 cmd->dst_io = (struct uio *)crp->crp_buf;
2525 goto errout; /* XXX we don't handle contiguous buffers! */
2528 crd1 = crp->crp_desc;
2533 crd2 = crd1->crd_next;
2536 if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
2537 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2538 crd1->crd_alg == CRYPTO_SHA1 ||
2539 crd1->crd_alg == CRYPTO_MD5) {
2542 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
2543 crd1->crd_alg == CRYPTO_3DES_CBC ||
2544 crd1->crd_alg == CRYPTO_AES_CBC ||
2545 crd1->crd_alg == CRYPTO_ARC4) {
2546 if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
2547 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2555 if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
2556 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2557 crd1->crd_alg == CRYPTO_MD5 ||
2558 crd1->crd_alg == CRYPTO_SHA1) &&
2559 (crd2->crd_alg == CRYPTO_DES_CBC ||
2560 crd2->crd_alg == CRYPTO_3DES_CBC ||
2561 crd2->crd_alg == CRYPTO_AES_CBC ||
2562 crd2->crd_alg == CRYPTO_ARC4) &&
2563 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2564 cmd->base_masks = HIFN_BASE_CMD_DECODE;
2567 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2568 crd1->crd_alg == CRYPTO_ARC4 ||
2569 crd1->crd_alg == CRYPTO_3DES_CBC ||
2570 crd1->crd_alg == CRYPTO_AES_CBC) &&
2571 (crd2->crd_alg == CRYPTO_MD5_HMAC ||
2572 crd2->crd_alg == CRYPTO_SHA1_HMAC ||
2573 crd2->crd_alg == CRYPTO_MD5 ||
2574 crd2->crd_alg == CRYPTO_SHA1) &&
2575 (crd1->crd_flags & CRD_F_ENCRYPT)) {
2580 * We cannot order the 7751 as requested
2588 cmd->enccrd = enccrd;
2589 cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2590 switch (enccrd->crd_alg) {
2592 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2594 case CRYPTO_DES_CBC:
2595 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2596 HIFN_CRYPT_CMD_MODE_CBC |
2597 HIFN_CRYPT_CMD_NEW_IV;
2599 case CRYPTO_3DES_CBC:
2600 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2601 HIFN_CRYPT_CMD_MODE_CBC |
2602 HIFN_CRYPT_CMD_NEW_IV;
2604 case CRYPTO_AES_CBC:
2605 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
2606 HIFN_CRYPT_CMD_MODE_CBC |
2607 HIFN_CRYPT_CMD_NEW_IV;
2613 if (enccrd->crd_alg != CRYPTO_ARC4) {
2614 ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
2615 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2616 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2617 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2618 bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2620 bcopy(sc->sc_sessions[session].hs_iv,
2623 if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2625 crypto_copyback(crp->crp_flags,
2626 crp->crp_buf, enccrd->crd_inject,
2630 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2631 bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2633 crypto_copydata(crp->crp_flags,
2634 crp->crp_buf, enccrd->crd_inject,
2640 if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
2641 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2642 cmd->ck = enccrd->crd_key;
2643 cmd->cklen = enccrd->crd_klen >> 3;
2644 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2647 * Need to specify the size for the AES key in the masks.
2649 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
2650 HIFN_CRYPT_CMD_ALG_AES) {
2651 switch (cmd->cklen) {
2653 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
2656 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
2659 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
2669 cmd->maccrd = maccrd;
2670 cmd->base_masks |= HIFN_BASE_CMD_MAC;
2672 switch (maccrd->crd_alg) {
2674 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2675 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2676 HIFN_MAC_CMD_POS_IPSEC;
2678 case CRYPTO_MD5_HMAC:
2679 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2680 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2681 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2684 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2685 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2686 HIFN_MAC_CMD_POS_IPSEC;
2688 case CRYPTO_SHA1_HMAC:
2689 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2690 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2691 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2695 if (maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
2696 maccrd->crd_alg == CRYPTO_MD5_HMAC) {
2697 cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2698 bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
2699 bzero(cmd->mac + (maccrd->crd_klen >> 3),
2700 HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2705 cmd->session_num = session;
2708 err = hifn_crypto(sc, cmd, crp, hint);
2711 } else if (err == ERESTART) {
2713 * There weren't enough resources to dispatch the request
2714 * to the part. Notify the caller so they'll requeue this
2715 * request and resubmit it again soon.
2719 device_printf(sc->sc_dev, "requeue request\n");
2721 kfree(cmd, M_DEVBUF);
2722 sc->sc_needwakeup |= CRYPTO_SYMQ;
2728 kfree(cmd, M_DEVBUF);
2730 hifnstats.hst_invalid++;
2732 hifnstats.hst_nomem++;
2733 crp->crp_etype = err;
2739 hifn_abort(struct hifn_softc *sc)
2741 struct hifn_dma *dma = sc->sc_dma;
2742 struct hifn_command *cmd;
2743 struct cryptop *crp;
2746 i = dma->resk; u = dma->resu;
2748 cmd = dma->hifn_commands[i];
2749 KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i));
2750 dma->hifn_commands[i] = NULL;
2753 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2754 /* Salvage what we can. */
2757 if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2758 macbuf = dma->result_bufs[i];
2762 hifnstats.hst_opackets++;
2763 hifn_callback(sc, cmd, macbuf);
2765 if (cmd->src_map == cmd->dst_map) {
2766 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2767 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2769 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2770 BUS_DMASYNC_POSTWRITE);
2771 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2772 BUS_DMASYNC_POSTREAD);
2775 if (cmd->src_m != cmd->dst_m) {
2776 m_freem(cmd->src_m);
2777 crp->crp_buf = (caddr_t)cmd->dst_m;
2780 /* non-shared buffers cannot be restarted */
2781 if (cmd->src_map != cmd->dst_map) {
2783 * XXX should be EAGAIN, delayed until
2786 crp->crp_etype = ENOMEM;
2787 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2788 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2790 crp->crp_etype = ENOMEM;
2792 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2793 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2795 kfree(cmd, M_DEVBUF);
2796 if (crp->crp_etype != EAGAIN)
2800 if (++i == HIFN_D_RES_RSIZE)
2804 dma->resk = i; dma->resu = u;
2806 hifn_reset_board(sc, 1);
2808 hifn_init_pci_registers(sc);
2812 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf)
2814 struct hifn_dma *dma = sc->sc_dma;
2815 struct cryptop *crp = cmd->crp;
2816 struct cryptodesc *crd;
2818 int totlen, i, u, ivlen;
2820 if (cmd->src_map == cmd->dst_map) {
2821 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2822 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2824 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2825 BUS_DMASYNC_POSTWRITE);
2826 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2827 BUS_DMASYNC_POSTREAD);
2830 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2831 if (cmd->src_m != cmd->dst_m) {
2832 crp->crp_buf = (caddr_t)cmd->dst_m;
2833 totlen = cmd->src_mapsize;
2834 for (m = cmd->dst_m; m != NULL; m = m->m_next) {
2835 if (totlen < m->m_len) {
2841 cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len;
2842 m_freem(cmd->src_m);
2846 if (cmd->sloplen != 0) {
2847 crypto_copyback(crp->crp_flags, crp->crp_buf,
2848 cmd->src_mapsize - cmd->sloplen, cmd->sloplen,
2849 (caddr_t)&dma->slop[cmd->slopidx]);
2852 i = dma->dstk; u = dma->dstu;
2854 if (i == HIFN_D_DST_RSIZE)
2856 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2857 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2858 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2859 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2860 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2865 dma->dstk = i; dma->dstu = u;
2867 hifnstats.hst_obytes += cmd->dst_mapsize;
2869 if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
2870 HIFN_BASE_CMD_CRYPT) {
2871 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2872 if (crd->crd_alg != CRYPTO_DES_CBC &&
2873 crd->crd_alg != CRYPTO_3DES_CBC &&
2874 crd->crd_alg != CRYPTO_AES_CBC)
2876 ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
2877 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2878 crypto_copydata(crp->crp_flags, crp->crp_buf,
2879 crd->crd_skip + crd->crd_len - ivlen, ivlen,
2880 cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2885 if (macbuf != NULL) {
2886 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2889 if (crd->crd_alg != CRYPTO_MD5 &&
2890 crd->crd_alg != CRYPTO_SHA1 &&
2891 crd->crd_alg != CRYPTO_MD5_HMAC &&
2892 crd->crd_alg != CRYPTO_SHA1_HMAC) {
2895 len = cmd->softc->sc_sessions[cmd->session_num].hs_mlen;
2896 crypto_copyback(crp->crp_flags, crp->crp_buf,
2897 crd->crd_inject, len, macbuf);
2902 if (cmd->src_map != cmd->dst_map) {
2903 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2904 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2906 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2907 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2908 kfree(cmd, M_DEVBUF);
2913 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
2914 * and Group 1 registers; avoid conditions that could create
2915 * burst writes by doing a read in between the writes.
2917 * NB: The read we interpose is always to the same register;
2918 * we do this because reading from an arbitrary (e.g. last)
2919 * register may not always work.
2922 hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2924 if (sc->sc_flags & HIFN_IS_7811) {
2925 if (sc->sc_bar0_lastreg == reg - 4)
2926 bus_space_read_4(sc->sc_st0, sc->sc_sh0, HIFN_0_PUCNFG);
2927 sc->sc_bar0_lastreg = reg;
2929 bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
2933 hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2935 if (sc->sc_flags & HIFN_IS_7811) {
2936 if (sc->sc_bar1_lastreg == reg - 4)
2937 bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
2938 sc->sc_bar1_lastreg = reg;
2940 bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);