kernel: Use DEVMETHOD_END in the drivers.
[dragonfly.git] / sys / dev / crypto / hifn / hifn7751.c
1 /* $FreeBSD: src/sys/dev/hifn/hifn7751.c,v 1.5.2.5 2003/06/04 17:56:59 sam Exp $ */
2 /* $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $       */
3
4 /*
5  * Invertex AEON / Hifn 7751 driver
6  * Copyright (c) 1999 Invertex Inc. All rights reserved.
7  * Copyright (c) 1999 Theo de Raadt
8  * Copyright (c) 2000-2001 Network Security Technologies, Inc.
9  *                      http://www.netsec.net
10  * Copyright (c) 2003 Hifn Inc.
11  *
12  * This driver is based on a previous driver by Invertex, for which they
13  * requested:  Please send any comments, feedback, bug-fixes, or feature
14  * requests to software@invertex.com.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  *
20  * 1. Redistributions of source code must retain the above copyright
21  *   notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *   notice, this list of conditions and the following disclaimer in the
24  *   documentation and/or other materials provided with the distribution.
25  * 3. The name of the author may not be used to endorse or promote products
26  *   derived from this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
29  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
30  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
31  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
32  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
33  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
37  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38  *
39  * Effort sponsored in part by the Defense Advanced Research Projects
40  * Agency (DARPA) and Air Force Research Laboratory, Air Force
41  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
42  *
43  */
44
45 /*
46  * Driver for various Hifn encryption processors.
47  */
48 #include "opt_hifn.h"
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/proc.h>
53 #include <sys/errno.h>
54 #include <sys/malloc.h>
55 #include <sys/kernel.h>
56 #include <sys/mbuf.h>
57 #include <sys/sysctl.h>
58 #include <sys/bus.h>
59 #include <sys/rman.h>
60 #include <sys/random.h>
61 #include <sys/thread2.h>
62 #include <sys/uio.h>
63
64 #include <vm/vm.h>
65 #include <vm/pmap.h>
66
67 #include <machine/clock.h>
68 #include <opencrypto/cryptodev.h>
69
70 #include "cryptodev_if.h"
71
72 #include <bus/pci/pcivar.h>
73 #include <bus/pci/pcireg.h>
74
75 #ifdef HIFN_RNDTEST
76 #include "../rndtest/rndtest.h"
77 #endif
78 #include "hifn7751reg.h"
79 #include "hifn7751var.h"
80
81 /*
82  * Prototypes and count for the pci_device structure
83  */
84 static  int hifn_probe(device_t);
85 static  int hifn_attach(device_t);
86 static  int hifn_detach(device_t);
87 static  int hifn_suspend(device_t);
88 static  int hifn_resume(device_t);
89 static  void hifn_shutdown(device_t);
90
91 static  void hifn_reset_board(struct hifn_softc *, int);
92 static  void hifn_reset_puc(struct hifn_softc *);
93 static  void hifn_puc_wait(struct hifn_softc *);
94 static  int hifn_enable_crypto(struct hifn_softc *);
95 static  void hifn_set_retry(struct hifn_softc *sc);
96 static  void hifn_init_dma(struct hifn_softc *);
97 static  void hifn_init_pci_registers(struct hifn_softc *);
98 static  int hifn_sramsize(struct hifn_softc *);
99 static  int hifn_dramsize(struct hifn_softc *);
100 static  int hifn_ramtype(struct hifn_softc *);
101 static  void hifn_sessions(struct hifn_softc *);
102 static  void hifn_intr(void *);
103 static  u_int hifn_write_command(struct hifn_command *, u_int8_t *);
104 static  u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
105 static  int hifn_newsession(device_t, u_int32_t *, struct cryptoini *);
106 static  int hifn_freesession(device_t, u_int64_t);
107 static  int hifn_process(device_t, struct cryptop *, int);
108 static  void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
109 static  int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int);
110 static  int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
111 static  int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
112 static  int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
113 static  int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
114 static  int hifn_init_pubrng(struct hifn_softc *);
115 #ifndef HIFN_NO_RNG
116 static  void hifn_rng(void *);
117 #endif
118 static  void hifn_tick(void *);
119 static  void hifn_abort(struct hifn_softc *);
120 static  void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
121
122 static  void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t);
123 static  void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t);
124
125
126 static device_method_t hifn_methods[] = {
127         /* Device interface */
128         DEVMETHOD(device_probe,         hifn_probe),
129         DEVMETHOD(device_attach,        hifn_attach),
130         DEVMETHOD(device_detach,        hifn_detach),
131         DEVMETHOD(device_suspend,       hifn_suspend),
132         DEVMETHOD(device_resume,        hifn_resume),
133         DEVMETHOD(device_shutdown,      hifn_shutdown),
134
135         /* bus interface */
136         DEVMETHOD(bus_print_child,      bus_generic_print_child),
137         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
138
139         /* crypto device methods */
140         DEVMETHOD(cryptodev_newsession, hifn_newsession),
141         DEVMETHOD(cryptodev_freesession,hifn_freesession),
142         DEVMETHOD(cryptodev_process,    hifn_process),
143
144         DEVMETHOD_END
145 };
146 static driver_t hifn_driver = {
147         "hifn",
148         hifn_methods,
149         sizeof (struct hifn_softc)
150 };
151 static devclass_t hifn_devclass;
152
153 DECLARE_DUMMY_MODULE(hifn);
154 DRIVER_MODULE(hifn, pci, hifn_driver, hifn_devclass, NULL, NULL);
155 MODULE_DEPEND(hifn, crypto, 1, 1, 1);
156 #ifdef HIFN_RNDTEST
157 MODULE_DEPEND(hifn, rndtest, 1, 1, 1);
158 #endif
159
160 static __inline__ u_int32_t
161 READ_REG_0(struct hifn_softc *sc, bus_size_t reg)
162 {
163     u_int32_t v = bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg);
164     sc->sc_bar0_lastreg = (bus_size_t) -1;
165     return (v);
166 }
167 #define WRITE_REG_0(sc, reg, val)       hifn_write_reg_0(sc, reg, val)
168
169 static __inline__ u_int32_t
170 READ_REG_1(struct hifn_softc *sc, bus_size_t reg)
171 {
172     u_int32_t v = bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg);
173     sc->sc_bar1_lastreg = (bus_size_t) -1;
174     return (v);
175 }
176 #define WRITE_REG_1(sc, reg, val)       hifn_write_reg_1(sc, reg, val)
177
178 SYSCTL_NODE(_hw, OID_AUTO, hifn, CTLFLAG_RD, 0, "Hifn driver parameters");
179
180 #ifdef HIFN_DEBUG
181 static  int hifn_debug = 0;
182 SYSCTL_INT(_hw_hifn, OID_AUTO, debug, CTLFLAG_RW, &hifn_debug,
183             0, "control debugging msgs");
184 #endif
185
186 static  struct hifn_stats hifnstats;
187 SYSCTL_STRUCT(_hw_hifn, OID_AUTO, stats, CTLFLAG_RD, &hifnstats,
188             hifn_stats, "driver statistics");
189 static  int hifn_maxbatch = 1;
190 SYSCTL_INT(_hw_hifn, OID_AUTO, maxbatch, CTLFLAG_RW, &hifn_maxbatch,
191             0, "max ops to batch w/o interrupt");
192
193 /*
194  * Probe for a supported device.  The PCI vendor and device
195  * IDs are used to detect devices we know how to handle.
196  */
197 static int
198 hifn_probe(device_t dev)
199 {
200         if (pci_get_vendor(dev) == PCI_VENDOR_INVERTEX &&
201             pci_get_device(dev) == PCI_PRODUCT_INVERTEX_AEON)
202                 return (0);
203         if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
204             (pci_get_device(dev) == PCI_PRODUCT_HIFN_7751 ||
205              pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
206              pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
207              pci_get_device(dev) == PCI_PRODUCT_HIFN_7956 ||
208              pci_get_device(dev) == PCI_PRODUCT_HIFN_7811))
209                 return (0);
210         if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
211             pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751)
212                 return (0);
213         if (pci_get_vendor(dev) == PCI_VENDOR_HIFN) {
214                 device_printf(dev,"device id = 0x%x\n", pci_get_device(dev) );
215                 return (0);
216         }
217         return (ENXIO);
218 }
219
220 static void
221 hifn_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
222 {
223         bus_addr_t *paddr = (bus_addr_t*) arg;
224         *paddr = segs->ds_addr;
225 }
226
227 static const char*
228 hifn_partname(struct hifn_softc *sc)
229 {
230         /* XXX sprintf numbers when not decoded */
231         switch (pci_get_vendor(sc->sc_dev)) {
232         case PCI_VENDOR_HIFN:
233                 switch (pci_get_device(sc->sc_dev)) {
234                 case PCI_PRODUCT_HIFN_6500:     return "Hifn 6500";
235                 case PCI_PRODUCT_HIFN_7751:     return "Hifn 7751";
236                 case PCI_PRODUCT_HIFN_7811:     return "Hifn 7811";
237                 case PCI_PRODUCT_HIFN_7951:     return "Hifn 7951";
238                 case PCI_PRODUCT_HIFN_7955:     return "Hifn 7955";
239                 case PCI_PRODUCT_HIFN_7956:     return "Hifn 7956";
240                 }
241                 return "Hifn unknown-part";
242         case PCI_VENDOR_INVERTEX:
243                 switch (pci_get_device(sc->sc_dev)) {
244                 case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON";
245                 }
246                 return "Invertex unknown-part";
247         case PCI_VENDOR_NETSEC:
248                 switch (pci_get_device(sc->sc_dev)) {
249                 case PCI_PRODUCT_NETSEC_7751:   return "NetSec 7751";
250                 }
251                 return "NetSec unknown-part";
252         }
253         return "Unknown-vendor unknown-part";
254 }
255
256 static void
257 default_harvest(struct rndtest_state *rsp, void *buf, u_int count)
258 {
259         u_int32_t *p = (u_int32_t *)buf;
260         for (count /= sizeof (u_int32_t); count; count--)
261                 add_true_randomness(*p++);
262 }
263
264 static u_int
265 checkmaxmin(device_t dev, const char *what, u_int v, u_int min, u_int max)
266 {
267         if (v > max) {
268                 device_printf(dev, "Warning, %s %u out of range, "
269                     "using max %u\n", what, v, max);
270                 v = max;
271         } else if (v < min) {
272                 device_printf(dev, "Warning, %s %u out of range, "
273                     "using min %u\n", what, v, min);
274                 v = min;
275         }
276         return v;
277 }
278
279 /*
280  * Select PLL configuration for 795x parts.  This is complicated in
281  * that we cannot determine the optimal parameters without user input.
282  * The reference clock is derived from an external clock through a
283  * multiplier.  The external clock is either the host bus (i.e. PCI)
284  * or an external clock generator.  When using the PCI bus we assume
285  * the clock is either 33 or 66 MHz; for an external source we cannot
286  * tell the speed.
287  *
288  * PLL configuration is done with a string: "pci" for PCI bus, or "ext"
289  * for an external source, followed by the frequency.  We calculate
290  * the appropriate multiplier and PLL register contents accordingly.
291  * When no configuration is given we default to "pci66" since that
292  * always will allow the card to work.  If a card is using the PCI
293  * bus clock and in a 33MHz slot then it will be operating at half
294  * speed until the correct information is provided.
295  *
296  * We use a default setting of "ext66" because according to Mike Ham
297  * of HiFn, almost every board in existence has an external crystal
298  * populated at 66Mhz. Using PCI can be a problem on modern motherboards,
299  * because PCI33 can have clocks from 0 to 33Mhz, and some have
300  * non-PCI-compliant spread-spectrum clocks, which can confuse the pll.
301  */
302 static void
303 hifn_getpllconfig(device_t dev, u_int *pll)
304 {
305         char *pllspec;
306         u_int freq, mul, fl, fh;
307         u_int32_t pllconfig;
308         char *nxt;
309
310         if (resource_string_value("hifn", device_get_unit(dev),
311             "pllconfig", &pllspec))
312                 pllspec = "ext66";
313         fl = 33, fh = 66;
314         pllconfig = 0;
315         if (strncmp(pllspec, "ext", 3) == 0) {
316                 pllspec += 3;
317                 pllconfig |= HIFN_PLL_REF_SEL;
318                 switch (pci_get_device(dev)) {
319                 case PCI_PRODUCT_HIFN_7955:
320                 case PCI_PRODUCT_HIFN_7956:
321                         fl = 20, fh = 100;
322                         break;
323 #ifdef notyet
324                 case PCI_PRODUCT_HIFN_7954:
325                         fl = 20, fh = 66;
326                         break;
327 #endif
328                 }
329         } else if (strncmp(pllspec, "pci", 3) == 0)
330                 pllspec += 3;
331         freq = strtoul(pllspec, &nxt, 10);
332         if (nxt == pllspec)
333                 freq = 66;
334         else
335                 freq = checkmaxmin(dev, "frequency", freq, fl, fh);
336         /*
337          * Calculate multiplier.  We target a Fck of 266 MHz,
338          * allowing only even values, possibly rounded down.
339          * Multipliers > 8 must set the charge pump current.
340          */
341         mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12);
342         pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT;
343         if (mul > 8)
344                 pllconfig |= HIFN_PLL_IS;
345         *pll = pllconfig;
346 }
347
348 /*
349  * Attach an interface that successfully probed.
350  */
351 static int 
352 hifn_attach(device_t dev)
353 {
354         struct hifn_softc *sc = device_get_softc(dev);
355         u_int32_t cmd;
356         caddr_t kva;
357         int rseg, rid;
358         char rbase;
359         u_int16_t ena, rev;
360
361         KASSERT(sc != NULL, ("hifn_attach: null software carrier!"));
362         bzero(sc, sizeof (*sc));
363         sc->sc_dev = dev;
364
365         lockinit(&sc->sc_lock, __DECONST(char *, device_get_nameunit(dev)),
366             0, LK_CANRECURSE);
367
368         /* XXX handle power management */
369
370         /*
371          * The 7951 and 795x have a random number generator and
372          * public key support; note this.
373          */
374         if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
375             (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
376              pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
377              pci_get_device(dev) == PCI_PRODUCT_HIFN_7956))
378                 sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC;
379         /*
380          * The 7811 has a random number generator and
381          * we also note it's identity 'cuz of some quirks.
382          */
383         if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
384             pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)
385                 sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG;
386
387         /*
388          * The 795x parts support AES.
389          */
390         if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
391             (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
392              pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) {
393                 sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES;
394                 /*
395                  * Select PLL configuration.  This depends on the
396                  * bus and board design and must be manually configured
397                  * if the default setting is unacceptable.
398                  */
399                 hifn_getpllconfig(dev, &sc->sc_pllconfig);
400         }
401
402         /*
403          * Configure support for memory-mapped access to
404          * registers and for DMA operations.
405          */
406 #define PCIM_ENA        (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN)
407         cmd = pci_read_config(dev, PCIR_COMMAND, 4);
408         cmd |= PCIM_ENA;
409         pci_write_config(dev, PCIR_COMMAND, cmd, 4);
410         cmd = pci_read_config(dev, PCIR_COMMAND, 4);
411         if ((cmd & PCIM_ENA) != PCIM_ENA) {
412                 device_printf(dev, "failed to enable %s\n",
413                         (cmd & PCIM_ENA) == 0 ?
414                                 "memory mapping & bus mastering" :
415                         (cmd & PCIM_CMD_MEMEN) == 0 ?
416                                 "memory mapping" : "bus mastering");
417                 goto fail_pci;
418         }
419 #undef PCIM_ENA
420
421         /*
422          * Setup PCI resources. Note that we record the bus
423          * tag and handle for each register mapping, this is
424          * used by the READ_REG_0, WRITE_REG_0, READ_REG_1,
425          * and WRITE_REG_1 macros throughout the driver.
426          */
427         rid = HIFN_BAR0;
428         sc->sc_bar0res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
429                                             0, ~0, 1, RF_ACTIVE);
430         if (sc->sc_bar0res == NULL) {
431                 device_printf(dev, "cannot map bar%d register space\n", 0);
432                 goto fail_pci;
433         }
434         sc->sc_st0 = rman_get_bustag(sc->sc_bar0res);
435         sc->sc_sh0 = rman_get_bushandle(sc->sc_bar0res);
436         sc->sc_bar0_lastreg = (bus_size_t) -1;
437
438         rid = HIFN_BAR1;
439         sc->sc_bar1res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
440                                             0, ~0, 1, RF_ACTIVE);
441         if (sc->sc_bar1res == NULL) {
442                 device_printf(dev, "cannot map bar%d register space\n", 1);
443                 goto fail_io0;
444         }
445         sc->sc_st1 = rman_get_bustag(sc->sc_bar1res);
446         sc->sc_sh1 = rman_get_bushandle(sc->sc_bar1res);
447         sc->sc_bar1_lastreg = (bus_size_t) -1;
448
449         hifn_set_retry(sc);
450
451         /*
452          * Setup the area where the Hifn DMA's descriptors
453          * and associated data structures.
454          */
455         if (bus_dma_tag_create(NULL,                    /* parent */
456                                1, 0,                    /* alignment,boundary */
457                                BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
458                                BUS_SPACE_MAXADDR,       /* highaddr */
459                                NULL, NULL,              /* filter, filterarg */
460                                HIFN_MAX_DMALEN,         /* maxsize */
461                                MAX_SCATTER,             /* nsegments */
462                                HIFN_MAX_SEGLEN,         /* maxsegsize */
463                                BUS_DMA_ALLOCNOW,        /* flags */
464                                &sc->sc_dmat)) {
465                 device_printf(dev, "cannot allocate DMA tag\n");
466                 goto fail_io1;
467         }
468         if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
469                 device_printf(dev, "cannot create dma map\n");
470                 bus_dma_tag_destroy(sc->sc_dmat);
471                 goto fail_io1;
472         }
473         if (bus_dmamem_alloc(sc->sc_dmat, (void**) &kva, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
474                 device_printf(dev, "cannot alloc dma buffer\n");
475                 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
476                 bus_dma_tag_destroy(sc->sc_dmat);
477                 goto fail_io1;
478         }
479         if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, kva,
480                              sizeof (*sc->sc_dma),
481                              hifn_dmamap_cb, &sc->sc_dma_physaddr,
482                              BUS_DMA_NOWAIT)) {
483                 device_printf(dev, "cannot load dma map\n");
484                 bus_dmamem_free(sc->sc_dmat, kva, sc->sc_dmamap);
485                 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
486                 bus_dma_tag_destroy(sc->sc_dmat);
487                 goto fail_io1;
488         }
489         sc->sc_dma = (struct hifn_dma *)kva;
490         bzero(sc->sc_dma, sizeof(*sc->sc_dma));
491
492         KASSERT(sc->sc_st0 != 0, ("hifn_attach: null bar0 tag!"));
493         KASSERT(sc->sc_sh0 != 0, ("hifn_attach: null bar0 handle!"));
494         KASSERT(sc->sc_st1 != 0, ("hifn_attach: null bar1 tag!"));
495         KASSERT(sc->sc_sh1 != 0, ("hifn_attach: null bar1 handle!"));
496
497         /*
498          * Reset the board and do the ``secret handshake''
499          * to enable the crypto support.  Then complete the
500          * initialization procedure by setting up the interrupt
501          * and hooking in to the system crypto support so we'll
502          * get used for system services like the crypto device,
503          * IPsec, RNG device, etc.
504          */
505         hifn_reset_board(sc, 0);
506
507         if (hifn_enable_crypto(sc) != 0) {
508                 device_printf(dev, "crypto enabling failed\n");
509                 goto fail_mem;
510         }
511         hifn_reset_puc(sc);
512
513         hifn_init_dma(sc);
514         hifn_init_pci_registers(sc);
515
516         /* XXX can't dynamically determine ram type for 795x; force dram */
517         if (sc->sc_flags & HIFN_IS_7956)
518                 sc->sc_drammodel = 1;
519         else if (hifn_ramtype(sc))
520                 goto fail_mem;
521
522         if (sc->sc_drammodel == 0)
523                 hifn_sramsize(sc);
524         else
525                 hifn_dramsize(sc);
526
527         /*
528          * Workaround for NetSec 7751 rev A: half ram size because two
529          * of the address lines were left floating
530          */
531         if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
532             pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 &&
533             pci_get_revid(dev) == 0x61) /*XXX???*/
534                 sc->sc_ramsize >>= 1;
535
536         /*
537          * Arrange the interrupt line.
538          */
539         rid = 0;
540         sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid,
541                                         0, ~0, 1, RF_SHAREABLE|RF_ACTIVE);
542         if (sc->sc_irq == NULL) {
543                 device_printf(dev, "could not map interrupt\n");
544                 goto fail_mem;
545         }
546         /*
547          * NB: Network code assumes we are blocked with splimp()
548          *     so make sure the IRQ is marked appropriately.
549          */
550         if (bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
551                            hifn_intr, sc,
552                            &sc->sc_intrhand, NULL)) {
553                 device_printf(dev, "could not setup interrupt\n");
554                 goto fail_intr2;
555         }
556
557         hifn_sessions(sc);
558
559         /*
560          * NB: Keep only the low 16 bits; this masks the chip id
561          *     from the 7951.
562          */
563         rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff;
564
565         rseg = sc->sc_ramsize / 1024;
566         rbase = 'K';
567         if (sc->sc_ramsize >= (1024 * 1024)) {
568                 rbase = 'M';
569                 rseg /= 1024;
570         }
571         device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram, %u sessions\n",
572                 hifn_partname(sc), rev,
573                 rseg, rbase, sc->sc_drammodel ? 'd' : 's',
574                 sc->sc_maxses);
575
576         if (sc->sc_flags & HIFN_IS_7956)
577                 kprintf(", pll=0x%x<%s clk, %ux mult>",
578                         sc->sc_pllconfig,
579                         sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
580                         2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
581         kprintf("\n");
582
583         sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
584         if (sc->sc_cid < 0) {
585                 device_printf(dev, "could not get crypto driver id\n");
586                 goto fail_intr;
587         }
588
589         WRITE_REG_0(sc, HIFN_0_PUCNFG,
590             READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
591         ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
592
593         switch (ena) {
594         case HIFN_PUSTAT_ENA_2:
595                 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
596                 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
597                 if (sc->sc_flags & HIFN_HAS_AES)
598                         crypto_register(sc->sc_cid, CRYPTO_AES_CBC,  0, 0);
599                 /*FALLTHROUGH*/
600         case HIFN_PUSTAT_ENA_1:
601                 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
602                 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
603                 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
604                 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
605                 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
606                 break;
607         }
608
609         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
610             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
611
612         if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
613                 hifn_init_pubrng(sc);
614
615         /* NB: 1 means the callout runs w/o Giant locked */
616         callout_init_mp(&sc->sc_tickto);
617         callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
618
619         return (0);
620
621 fail_intr:
622         bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
623 fail_intr2:
624         /* XXX don't store rid */
625         bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
626 fail_mem:
627         bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
628         bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
629         bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
630         bus_dma_tag_destroy(sc->sc_dmat);
631
632         /* Turn off DMA polling */
633         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
634             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
635 fail_io1:
636         bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
637 fail_io0:
638         bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
639 fail_pci:
640         lockuninit(&sc->sc_lock);
641         return (ENXIO);
642 }
643
644 /*
645  * Detach an interface that successfully probed.
646  */
647 static int 
648 hifn_detach(device_t dev)
649 {
650         struct hifn_softc *sc = device_get_softc(dev);
651
652         KASSERT(sc != NULL, ("hifn_detach: null software carrier!"));
653
654         /* disable interrupts */
655         WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
656
657         /*XXX other resources */
658         callout_stop(&sc->sc_tickto);
659         callout_stop(&sc->sc_rngto);
660 #ifdef HIFN_RNDTEST
661         if (sc->sc_rndtest)
662                 rndtest_detach(sc->sc_rndtest);
663 #endif
664
665         /* Turn off DMA polling */
666         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
667             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
668
669         crypto_unregister_all(sc->sc_cid);
670
671         bus_generic_detach(dev);        /*XXX should be no children, right? */
672
673         bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
674         /* XXX don't store rid */
675         bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
676
677         bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
678         bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
679         bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
680         bus_dma_tag_destroy(sc->sc_dmat);
681
682         bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
683         bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
684
685         lockuninit(&sc->sc_lock);
686
687         return (0);
688 }
689
690 /*
691  * Stop all chip I/O so that the kernel's probe routines don't
692  * get confused by errant DMAs when rebooting.
693  */
694 static void
695 hifn_shutdown(device_t dev)
696 {
697 #ifdef notyet
698         hifn_stop(device_get_softc(dev));
699 #endif
700 }
701
702 /*
703  * Device suspend routine.  Stop the interface and save some PCI
704  * settings in case the BIOS doesn't restore them properly on
705  * resume.
706  */
707 static int
708 hifn_suspend(device_t dev)
709 {
710         struct hifn_softc *sc = device_get_softc(dev);
711 #ifdef notyet
712         int i;
713
714         hifn_stop(sc);
715         for (i = 0; i < 5; i++)
716                 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4);
717         sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
718         sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
719         sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
720         sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
721 #endif
722         sc->sc_suspended = 1;
723
724         return (0);
725 }
726
727 /*
728  * Device resume routine.  Restore some PCI settings in case the BIOS
729  * doesn't, re-enable busmastering, and restart the interface if
730  * appropriate.
731  */
732 static int
733 hifn_resume(device_t dev)
734 {
735         struct hifn_softc *sc = device_get_softc(dev);
736 #ifdef notyet
737         int i;
738
739         /* better way to do this? */
740         for (i = 0; i < 5; i++)
741                 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4);
742         pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4);
743         pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1);
744         pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1);
745         pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1);
746
747         /* reenable busmastering */
748         pci_enable_busmaster(dev);
749         pci_enable_io(dev, HIFN_RES);
750
751         /* reinitialize interface if necessary */
752         if (ifp->if_flags & IFF_UP)
753                 rl_init(sc);
754 #endif
755         sc->sc_suspended = 0;
756
757         return (0);
758 }
759
760 static int
761 hifn_init_pubrng(struct hifn_softc *sc)
762 {
763         u_int32_t r;
764         int i;
765
766 #ifdef HIFN_RNDTEST
767         sc->sc_rndtest = rndtest_attach(sc->sc_dev);
768         if (sc->sc_rndtest)
769                 sc->sc_harvest = rndtest_harvest;
770         else
771                 sc->sc_harvest = default_harvest;
772 #else
773         sc->sc_harvest = default_harvest;
774 #endif
775         if ((sc->sc_flags & HIFN_IS_7811) == 0) {
776                 /* Reset 7951 public key/rng engine */
777                 WRITE_REG_1(sc, HIFN_1_PUB_RESET,
778                     READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
779
780                 for (i = 0; i < 100; i++) {
781                         DELAY(1000);
782                         if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
783                             HIFN_PUBRST_RESET) == 0)
784                                 break;
785                 }
786
787                 if (i == 100) {
788                         device_printf(sc->sc_dev, "public key init failed\n");
789                         return (1);
790                 }
791         }
792
793 #ifndef HIFN_NO_RNG
794         /* Enable the rng, if available */
795         if (sc->sc_flags & HIFN_HAS_RNG) {
796                 if (sc->sc_flags & HIFN_IS_7811) {
797                         r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
798                         if (r & HIFN_7811_RNGENA_ENA) {
799                                 r &= ~HIFN_7811_RNGENA_ENA;
800                                 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
801                         }
802                         WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
803                             HIFN_7811_RNGCFG_DEFL);
804                         r |= HIFN_7811_RNGENA_ENA;
805                         WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
806                 } else
807                         WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
808                             READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
809                             HIFN_RNGCFG_ENA);
810
811                 sc->sc_rngfirst = 1;
812                 if (hz >= 100)
813                         sc->sc_rnghz = hz / 100;
814                 else
815                         sc->sc_rnghz = 1;
816                 /* NB: 1 means the callout runs w/o Giant locked */
817                 callout_init_mp(&sc->sc_rngto);
818                 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
819         }
820 #endif
821
822         /* Enable public key engine, if available */
823         if (sc->sc_flags & HIFN_HAS_PUBLIC) {
824                 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
825                 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
826                 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
827         }
828
829         return (0);
830 }
831
832 #ifndef HIFN_NO_RNG
833 static void
834 hifn_rng(void *vsc)
835 {
836 #define RANDOM_BITS(n)  (n)*sizeof (u_int32_t), (n)*sizeof (u_int32_t)*NBBY, 0
837         struct hifn_softc *sc = vsc;
838         u_int32_t sts, num[2];
839         int i;
840
841         if (sc->sc_flags & HIFN_IS_7811) {
842                 for (i = 0; i < 5; i++) {
843                         sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
844                         if (sts & HIFN_7811_RNGSTS_UFL) {
845                                 device_printf(sc->sc_dev,
846                                               "RNG underflow: disabling\n");
847                                 return;
848                         }
849                         if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
850                                 break;
851
852                         /*
853                          * There are at least two words in the RNG FIFO
854                          * at this point.
855                          */
856                         num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
857                         num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
858                         /* NB: discard first data read */
859                         if (sc->sc_rngfirst)
860                                 sc->sc_rngfirst = 0;
861                         else
862                                 (*sc->sc_harvest)(sc->sc_rndtest,
863                                         num, sizeof (num));
864                 }
865         } else {
866                 num[0] = READ_REG_1(sc, HIFN_1_RNG_DATA);
867
868                 /* NB: discard first data read */
869                 if (sc->sc_rngfirst)
870                         sc->sc_rngfirst = 0;
871                 else
872                         (*sc->sc_harvest)(sc->sc_rndtest,
873                                 num, sizeof (num[0]));
874         }
875
876         callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
877 #undef RANDOM_BITS
878 }
879 #endif
880
881 static void
882 hifn_puc_wait(struct hifn_softc *sc)
883 {
884         int i;
885         int reg = HIFN_0_PUCTRL;
886
887         if (sc->sc_flags & HIFN_IS_7956) {
888                 reg = HIFN_0_PUCTRL2;
889         }
890
891         for (i = 5000; i > 0; i--) {
892                 DELAY(1);
893                 if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET))
894                         break;
895         }
896         if (!i)
897                 device_printf(sc->sc_dev, "proc unit did not reset\n");
898 }
899
900 /*
901  * Reset the processing unit.
902  */
903 static void
904 hifn_reset_puc(struct hifn_softc *sc)
905 {
906         int reg = HIFN_0_PUCTRL;
907
908         if (sc->sc_flags & HIFN_IS_7956) {
909                 reg = HIFN_0_PUCTRL2;
910         }
911
912         /* Reset processing unit */
913         WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA);
914         hifn_puc_wait(sc);
915 }
916
917 /*
918  * Set the Retry and TRDY registers; note that we set them to
919  * zero because the 7811 locks up when forced to retry (section
920  * 3.6 of "Specification Update SU-0014-04".  Not clear if we
921  * should do this for all Hifn parts, but it doesn't seem to hurt.
922  */
923 static void
924 hifn_set_retry(struct hifn_softc *sc)
925 {
926         /* NB: RETRY only responds to 8-bit reads/writes */
927         pci_write_config(sc->sc_dev, HIFN_RETRY_TIMEOUT, 0, 1);
928         pci_write_config(sc->sc_dev, HIFN_TRDY_TIMEOUT, 0, 4);
929 }
930
931 /*
932  * Resets the board.  Values in the regesters are left as is
933  * from the reset (i.e. initial values are assigned elsewhere).
934  */
935 static void
936 hifn_reset_board(struct hifn_softc *sc, int full)
937 {
938         u_int32_t reg;
939
940         /*
941          * Set polling in the DMA configuration register to zero.  0x7 avoids
942          * resetting the board and zeros out the other fields.
943          */
944         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
945             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
946
947         /*
948          * Now that polling has been disabled, we have to wait 1 ms
949          * before resetting the board.
950          */
951         DELAY(1000);
952
953         /* Reset the DMA unit */
954         if (full) {
955                 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
956                 DELAY(1000);
957         } else {
958                 WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
959                     HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
960                 hifn_reset_puc(sc);
961         }
962
963         KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!"));
964         bzero(sc->sc_dma, sizeof(*sc->sc_dma));
965
966         /* Bring dma unit out of reset */
967         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
968             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
969
970         hifn_puc_wait(sc);
971         hifn_set_retry(sc);
972
973         if (sc->sc_flags & HIFN_IS_7811) {
974                 for (reg = 0; reg < 1000; reg++) {
975                         if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
976                             HIFN_MIPSRST_CRAMINIT)
977                                 break;
978                         DELAY(1000);
979                 }
980                 if (reg == 1000)
981                         kprintf(": cram init timeout\n");
982         } else {
983           /* set up DMA configuration register #2 */
984           /* turn off all PK and BAR0 swaps */
985           WRITE_REG_1(sc, HIFN_1_DMA_CNFG2,
986                       (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)|
987                       (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)|
988                       (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)|
989                       (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT));
990         }
991 }
992
993 static u_int32_t
994 hifn_next_signature(u_int32_t a, u_int cnt)
995 {
996         int i;
997         u_int32_t v;
998
999         for (i = 0; i < cnt; i++) {
1000
1001                 /* get the parity */
1002                 v = a & 0x80080125;
1003                 v ^= v >> 16;
1004                 v ^= v >> 8;
1005                 v ^= v >> 4;
1006                 v ^= v >> 2;
1007                 v ^= v >> 1;
1008
1009                 a = (v & 1) ^ (a << 1);
1010         }
1011
1012         return a;
1013 }
1014
1015 struct pci2id {
1016         u_short         pci_vendor;
1017         u_short         pci_prod;
1018         char            card_id[13];
1019 };
1020 static struct pci2id pci2id[] = {
1021         {
1022                 PCI_VENDOR_HIFN,
1023                 PCI_PRODUCT_HIFN_7951,
1024                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1025                   0x00, 0x00, 0x00, 0x00, 0x00 }
1026         }, {
1027                 PCI_VENDOR_HIFN,
1028                 PCI_PRODUCT_HIFN_7955,
1029                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1030                   0x00, 0x00, 0x00, 0x00, 0x00 }
1031         }, {
1032                 PCI_VENDOR_HIFN,
1033                 PCI_PRODUCT_HIFN_7956,
1034                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1035                   0x00, 0x00, 0x00, 0x00, 0x00 }
1036         }, {
1037                 PCI_VENDOR_NETSEC,
1038                 PCI_PRODUCT_NETSEC_7751,
1039                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1040                   0x00, 0x00, 0x00, 0x00, 0x00 }
1041         }, {
1042                 PCI_VENDOR_INVERTEX,
1043                 PCI_PRODUCT_INVERTEX_AEON,
1044                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1045                   0x00, 0x00, 0x00, 0x00, 0x00 }
1046         }, {
1047                 PCI_VENDOR_HIFN,
1048                 PCI_PRODUCT_HIFN_7811,
1049                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1050                   0x00, 0x00, 0x00, 0x00, 0x00 }
1051         }, {
1052                 /*
1053                  * Other vendors share this PCI ID as well, such as
1054                  * http://www.powercrypt.com, and obviously they also
1055                  * use the same key.
1056                  */
1057                 PCI_VENDOR_HIFN,
1058                 PCI_PRODUCT_HIFN_7751,
1059                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1060                   0x00, 0x00, 0x00, 0x00, 0x00 }
1061         },
1062 };
1063
1064 /*
1065  * Checks to see if crypto is already enabled.  If crypto isn't enable,
1066  * "hifn_enable_crypto" is called to enable it.  The check is important,
1067  * as enabling crypto twice will lock the board.
1068  */
1069 static int 
1070 hifn_enable_crypto(struct hifn_softc *sc)
1071 {
1072         u_int32_t dmacfg, ramcfg, encl, addr, i;
1073         char *offtbl = NULL;
1074
1075         for (i = 0; i < NELEM(pci2id); i++) {
1076                 if (pci2id[i].pci_vendor == pci_get_vendor(sc->sc_dev) &&
1077                     pci2id[i].pci_prod == pci_get_device(sc->sc_dev)) {
1078                         offtbl = pci2id[i].card_id;
1079                         break;
1080                 }
1081         }
1082         if (offtbl == NULL) {
1083                 device_printf(sc->sc_dev, "Unknown card!\n");
1084                 return (1);
1085         }
1086
1087         ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1088         dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
1089
1090         /*
1091          * The RAM config register's encrypt level bit needs to be set before
1092          * every read performed on the encryption level register.
1093          */
1094         WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
1095
1096         encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
1097
1098         /*
1099          * Make sure we don't re-unlock.  Two unlocks kills chip until the
1100          * next reboot.
1101          */
1102         if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
1103 #ifdef HIFN_DEBUG
1104                 if (hifn_debug)
1105                         device_printf(sc->sc_dev,
1106                             "Strong crypto already enabled!\n");
1107 #endif
1108                 goto report;
1109         }
1110
1111         if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
1112 #ifdef HIFN_DEBUG
1113                 if (hifn_debug)
1114                         device_printf(sc->sc_dev,
1115                               "Unknown encryption level 0x%x\n", encl);
1116 #endif
1117                 return 1;
1118         }
1119
1120         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
1121             HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
1122         DELAY(1000);
1123         addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1);
1124         DELAY(1000);
1125         WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0);
1126         DELAY(1000);
1127
1128         for (i = 0; i <= 12; i++) {
1129                 addr = hifn_next_signature(addr, offtbl[i] + 0x101);
1130                 WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr);
1131
1132                 DELAY(1000);
1133         }
1134
1135         WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
1136         encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
1137
1138 #ifdef HIFN_DEBUG
1139         if (hifn_debug) {
1140                 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
1141                         device_printf(sc->sc_dev, "Engine is permanently "
1142                                 "locked until next system reset!\n");
1143                 else
1144                         device_printf(sc->sc_dev, "Engine enabled "
1145                                 "successfully!\n");
1146         }
1147 #endif
1148
1149 report:
1150         WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
1151         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
1152
1153         switch (encl) {
1154         case HIFN_PUSTAT_ENA_1:
1155         case HIFN_PUSTAT_ENA_2:
1156                 break;
1157         case HIFN_PUSTAT_ENA_0:
1158         default:
1159                 device_printf(sc->sc_dev, "disabled");
1160                 break;
1161         }
1162
1163         return 0;
1164 }
1165
1166 /*
1167  * Give initial values to the registers listed in the "Register Space"
1168  * section of the HIFN Software Development reference manual.
1169  */
1170 static void 
1171 hifn_init_pci_registers(struct hifn_softc *sc)
1172 {
1173         /* write fixed values needed by the Initialization registers */
1174         WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
1175         WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
1176         WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
1177
1178         /* write all 4 ring address registers */
1179         WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr +
1180             offsetof(struct hifn_dma, cmdr[0]));
1181         WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr +
1182             offsetof(struct hifn_dma, srcr[0]));
1183         WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr +
1184             offsetof(struct hifn_dma, dstr[0]));
1185         WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr +
1186             offsetof(struct hifn_dma, resr[0]));
1187
1188         DELAY(2000);
1189
1190         /* write status register */
1191         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1192             HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
1193             HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
1194             HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
1195             HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
1196             HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
1197             HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
1198             HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
1199             HIFN_DMACSR_S_WAIT |
1200             HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
1201             HIFN_DMACSR_C_WAIT |
1202             HIFN_DMACSR_ENGINE |
1203             ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
1204                 HIFN_DMACSR_PUBDONE : 0) |
1205             ((sc->sc_flags & HIFN_IS_7811) ?
1206                 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
1207
1208         sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
1209         sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
1210             HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
1211             HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
1212             ((sc->sc_flags & HIFN_IS_7811) ?
1213                 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
1214         sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1215         WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1216
1217         if (sc->sc_flags & HIFN_IS_7956) {
1218                 u_int32_t pll;
1219
1220                 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1221                     HIFN_PUCNFG_TCALLPHASES |
1222                     HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
1223
1224                 /* turn off the clocks and insure bypass is set */
1225                 pll = READ_REG_1(sc, HIFN_1_PLL);
1226                 pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL))
1227                     | HIFN_PLL_BP | HIFN_PLL_MBSET;
1228                 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1229                 DELAY(10*1000);         /* 10ms */
1230                 /* change configuration */
1231                 pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig;
1232                 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1233                 DELAY(10*1000);         /* 10ms */
1234                 /* disable bypass */
1235                 pll &= ~HIFN_PLL_BP;
1236                 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1237                 /* enable clocks with new configuration */
1238                 pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL;
1239                 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1240         } else {
1241                 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1242                     HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
1243                     HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
1244                     (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
1245         }
1246
1247         WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1248         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
1249             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
1250             ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
1251             ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
1252 }
1253
1254 /*
1255  * The maximum number of sessions supported by the card
1256  * is dependent on the amount of context ram, which
1257  * encryption algorithms are enabled, and how compression
1258  * is configured.  This should be configured before this
1259  * routine is called.
1260  */
1261 static void
1262 hifn_sessions(struct hifn_softc *sc)
1263 {
1264         u_int32_t pucnfg;
1265         int ctxsize;
1266
1267         pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1268
1269         if (pucnfg & HIFN_PUCNFG_COMPSING) {
1270                 if (pucnfg & HIFN_PUCNFG_ENCCNFG)
1271                         ctxsize = 128;
1272                 else
1273                         ctxsize = 512;
1274                 /*
1275                  * 7955/7956 has internal context memory of 32K
1276                  */
1277                 if (sc->sc_flags & HIFN_IS_7956)
1278                         sc->sc_maxses = 32768 / ctxsize;
1279                 else
1280                         sc->sc_maxses = 1 +
1281                             ((sc->sc_ramsize - 32768) / ctxsize);
1282         } else
1283                 sc->sc_maxses = sc->sc_ramsize / 16384;
1284
1285         if (sc->sc_maxses > 2048)
1286                 sc->sc_maxses = 2048;
1287 }
1288
1289 /*
1290  * Determine ram type (sram or dram).  Board should be just out of a reset
1291  * state when this is called.
1292  */
1293 static int
1294 hifn_ramtype(struct hifn_softc *sc)
1295 {
1296         u_int8_t data[8], dataexpect[8];
1297         int i;
1298
1299         for (i = 0; i < sizeof(data); i++)
1300                 data[i] = dataexpect[i] = 0x55;
1301         if (hifn_writeramaddr(sc, 0, data))
1302                 return (-1);
1303         if (hifn_readramaddr(sc, 0, data))
1304                 return (-1);
1305         if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1306                 sc->sc_drammodel = 1;
1307                 return (0);
1308         }
1309
1310         for (i = 0; i < sizeof(data); i++)
1311                 data[i] = dataexpect[i] = 0xaa;
1312         if (hifn_writeramaddr(sc, 0, data))
1313                 return (-1);
1314         if (hifn_readramaddr(sc, 0, data))
1315                 return (-1);
1316         if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1317                 sc->sc_drammodel = 1;
1318                 return (0);
1319         }
1320
1321         return (0);
1322 }
1323
1324 #define HIFN_SRAM_MAX           (32 << 20)
1325 #define HIFN_SRAM_STEP_SIZE     16384
1326 #define HIFN_SRAM_GRANULARITY   (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1327
1328 static int
1329 hifn_sramsize(struct hifn_softc *sc)
1330 {
1331         u_int32_t a;
1332         u_int8_t data[8];
1333         u_int8_t dataexpect[sizeof(data)];
1334         int32_t i;
1335
1336         for (i = 0; i < sizeof(data); i++)
1337                 data[i] = dataexpect[i] = i ^ 0x5a;
1338
1339         for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
1340                 a = i * HIFN_SRAM_STEP_SIZE;
1341                 bcopy(&i, data, sizeof(i));
1342                 hifn_writeramaddr(sc, a, data);
1343         }
1344
1345         for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1346                 a = i * HIFN_SRAM_STEP_SIZE;
1347                 bcopy(&i, dataexpect, sizeof(i));
1348                 if (hifn_readramaddr(sc, a, data) < 0)
1349                         return (0);
1350                 if (bcmp(data, dataexpect, sizeof(data)) != 0)
1351                         return (0);
1352                 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
1353         }
1354
1355         return (0);
1356 }
1357
1358 /*
1359  * XXX For dram boards, one should really try all of the
1360  * HIFN_PUCNFG_DSZ_*'s.  This just assumes that PUCNFG
1361  * is already set up correctly.
1362  */
1363 static int
1364 hifn_dramsize(struct hifn_softc *sc)
1365 {
1366         u_int32_t cnfg;
1367         
1368         if (sc->sc_flags & HIFN_IS_7956) {
1369                 /*
1370                  * 7955/7956 have a fixed internal ram of only 32K.
1371                  */
1372                 sc->sc_ramsize = 32768;
1373         } else {
1374                 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1375                     HIFN_PUCNFG_DRAMMASK;
1376                 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1377         }
1378         return (0);
1379 }
1380
1381 static void
1382 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp)
1383 {
1384         struct hifn_dma *dma = sc->sc_dma;
1385
1386         if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1387                 dma->cmdi = 0;
1388                 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1389                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1390                 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1391                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1392         }
1393         *cmdp = dma->cmdi++;
1394         dma->cmdk = dma->cmdi;
1395
1396         if (dma->srci == HIFN_D_SRC_RSIZE) {
1397                 dma->srci = 0;
1398                 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
1399                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1400                 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1401                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1402         }
1403         *srcp = dma->srci++;
1404         dma->srck = dma->srci;
1405
1406         if (dma->dsti == HIFN_D_DST_RSIZE) {
1407                 dma->dsti = 0;
1408                 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
1409                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1410                 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1411                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1412         }
1413         *dstp = dma->dsti++;
1414         dma->dstk = dma->dsti;
1415
1416         if (dma->resi == HIFN_D_RES_RSIZE) {
1417                 dma->resi = 0;
1418                 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1419                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1420                 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1421                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1422         }
1423         *resp = dma->resi++;
1424         dma->resk = dma->resi;
1425 }
1426
1427 static int
1428 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1429 {
1430         struct hifn_dma *dma = sc->sc_dma;
1431         hifn_base_command_t wc;
1432         const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1433         int r, cmdi, resi, srci, dsti;
1434
1435         wc.masks = htole16(3 << 13);
1436         wc.session_num = htole16(addr >> 14);
1437         wc.total_source_count = htole16(8);
1438         wc.total_dest_count = htole16(addr & 0x3fff);
1439
1440         hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1441
1442         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1443             HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1444             HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1445
1446         /* build write command */
1447         bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1448         *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc;
1449         bcopy(data, &dma->test_src, sizeof(dma->test_src));
1450
1451         dma->srcr[srci].p = htole32(sc->sc_dma_physaddr
1452             + offsetof(struct hifn_dma, test_src));
1453         dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr
1454             + offsetof(struct hifn_dma, test_dst));
1455
1456         dma->cmdr[cmdi].l = htole32(16 | masks);
1457         dma->srcr[srci].l = htole32(8 | masks);
1458         dma->dstr[dsti].l = htole32(4 | masks);
1459         dma->resr[resi].l = htole32(4 | masks);
1460
1461         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1462             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1463
1464         for (r = 10000; r >= 0; r--) {
1465                 DELAY(10);
1466                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1467                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1468                 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1469                         break;
1470                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1471                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1472         }
1473         if (r == 0) {
1474                 device_printf(sc->sc_dev, "writeramaddr -- "
1475                     "result[%d](addr %d) still valid\n", resi, addr);
1476                 r = -1;
1477                 return (-1);
1478         } else
1479                 r = 0;
1480
1481         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1482             HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1483             HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1484
1485         return (r);
1486 }
1487
1488 static int
1489 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1490 {
1491         struct hifn_dma *dma = sc->sc_dma;
1492         hifn_base_command_t rc;
1493         const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1494         int r, cmdi, srci, dsti, resi;
1495
1496         rc.masks = htole16(2 << 13);
1497         rc.session_num = htole16(addr >> 14);
1498         rc.total_source_count = htole16(addr & 0x3fff);
1499         rc.total_dest_count = htole16(8);
1500
1501         hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1502
1503         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1504             HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1505             HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1506
1507         bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1508         *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc;
1509
1510         dma->srcr[srci].p = htole32(sc->sc_dma_physaddr +
1511             offsetof(struct hifn_dma, test_src));
1512         dma->test_src = 0;
1513         dma->dstr[dsti].p =  htole32(sc->sc_dma_physaddr +
1514             offsetof(struct hifn_dma, test_dst));
1515         dma->test_dst = 0;
1516         dma->cmdr[cmdi].l = htole32(8 | masks);
1517         dma->srcr[srci].l = htole32(8 | masks);
1518         dma->dstr[dsti].l = htole32(8 | masks);
1519         dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1520
1521         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1522             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1523
1524         for (r = 10000; r >= 0; r--) {
1525                 DELAY(10);
1526                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1527                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1528                 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1529                         break;
1530                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1531                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1532         }
1533         if (r == 0) {
1534                 device_printf(sc->sc_dev, "readramaddr -- "
1535                     "result[%d](addr %d) still valid\n", resi, addr);
1536                 r = -1;
1537         } else {
1538                 r = 0;
1539                 bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
1540         }
1541
1542         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1543             HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1544             HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1545
1546         return (r);
1547 }
1548
1549 /*
1550  * Initialize the descriptor rings.
1551  */
1552 static void 
1553 hifn_init_dma(struct hifn_softc *sc)
1554 {
1555         struct hifn_dma *dma = sc->sc_dma;
1556         int i;
1557
1558         hifn_set_retry(sc);
1559
1560         /* initialize static pointer values */
1561         for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1562                 dma->cmdr[i].p = htole32(sc->sc_dma_physaddr +
1563                     offsetof(struct hifn_dma, command_bufs[i][0]));
1564         for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1565                 dma->resr[i].p = htole32(sc->sc_dma_physaddr +
1566                     offsetof(struct hifn_dma, result_bufs[i][0]));
1567
1568         dma->cmdr[HIFN_D_CMD_RSIZE].p =
1569             htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0]));
1570         dma->srcr[HIFN_D_SRC_RSIZE].p =
1571             htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0]));
1572         dma->dstr[HIFN_D_DST_RSIZE].p =
1573             htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0]));
1574         dma->resr[HIFN_D_RES_RSIZE].p =
1575             htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0]));
1576
1577         dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
1578         dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
1579         dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
1580 }
1581
1582 /*
1583  * Writes out the raw command buffer space.  Returns the
1584  * command buffer size.
1585  */
1586 static u_int
1587 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
1588 {
1589         u_int8_t *buf_pos;
1590         hifn_base_command_t *base_cmd;
1591         hifn_mac_command_t *mac_cmd;
1592         hifn_crypt_command_t *cry_cmd;
1593         int using_mac, using_crypt, len, ivlen;
1594         u_int32_t dlen, slen;
1595
1596         buf_pos = buf;
1597         using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1598         using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1599
1600         base_cmd = (hifn_base_command_t *)buf_pos;
1601         base_cmd->masks = htole16(cmd->base_masks);
1602         slen = cmd->src_mapsize;
1603         if (cmd->sloplen)
1604                 dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t);
1605         else
1606                 dlen = cmd->dst_mapsize;
1607         base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1608         base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1609         dlen >>= 16;
1610         slen >>= 16;
1611
1612         base_cmd->session_num = htole16(
1613             ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1614             ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1615         buf_pos += sizeof(hifn_base_command_t);
1616
1617         if (using_mac) {
1618                 mac_cmd = (hifn_mac_command_t *)buf_pos;
1619                 dlen = cmd->maccrd->crd_len;
1620                 mac_cmd->source_count = htole16(dlen & 0xffff);
1621                 dlen >>= 16;
1622                 mac_cmd->masks = htole16(cmd->mac_masks |
1623                     ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1624                 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1625                 mac_cmd->reserved = 0;
1626                 buf_pos += sizeof(hifn_mac_command_t);
1627         }
1628
1629         if (using_crypt) {
1630                 cry_cmd = (hifn_crypt_command_t *)buf_pos;
1631                 dlen = cmd->enccrd->crd_len;
1632                 cry_cmd->source_count = htole16(dlen & 0xffff);
1633                 dlen >>= 16;
1634                 cry_cmd->masks = htole16(cmd->cry_masks |
1635                     ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1636                 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1637                 cry_cmd->reserved = 0;
1638                 buf_pos += sizeof(hifn_crypt_command_t);
1639         }
1640
1641         if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1642                 bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
1643                 buf_pos += HIFN_MAC_KEY_LENGTH;
1644         }
1645
1646         if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1647                 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1648                 case HIFN_CRYPT_CMD_ALG_3DES:
1649                         bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
1650                         buf_pos += HIFN_3DES_KEY_LENGTH;
1651                         break;
1652                 case HIFN_CRYPT_CMD_ALG_DES:
1653                         bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
1654                         buf_pos += HIFN_DES_KEY_LENGTH;
1655                         break;
1656                 case HIFN_CRYPT_CMD_ALG_RC4:
1657                         len = 256;
1658                         do {
1659                                 int clen;
1660
1661                                 clen = MIN(cmd->cklen, len);
1662                                 bcopy(cmd->ck, buf_pos, clen);
1663                                 len -= clen;
1664                                 buf_pos += clen;
1665                         } while (len > 0);
1666                         bzero(buf_pos, 4);
1667                         buf_pos += 4;
1668                         break;
1669                 case HIFN_CRYPT_CMD_ALG_AES:
1670                         /*
1671                          * AES keys are variable 128, 192 and
1672                          * 256 bits (16, 24 and 32 bytes).
1673                          */
1674                         bcopy(cmd->ck, buf_pos, cmd->cklen);
1675                         buf_pos += cmd->cklen;
1676                         break;
1677                 }
1678         }
1679
1680         if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1681                 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1682                 case HIFN_CRYPT_CMD_ALG_AES:
1683                         ivlen = HIFN_AES_IV_LENGTH;
1684                         break;
1685                 default:
1686                         ivlen = HIFN_IV_LENGTH;
1687                         break;
1688                 }
1689                 bcopy(cmd->iv, buf_pos, ivlen);
1690                 buf_pos += ivlen;
1691         }
1692
1693         if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) {
1694                 bzero(buf_pos, 8);
1695                 buf_pos += 8;
1696         }
1697
1698         return (buf_pos - buf);
1699 #undef  MIN
1700 }
1701
1702 static int
1703 hifn_dmamap_aligned(struct hifn_operand *op)
1704 {
1705         int i;
1706
1707         for (i = 0; i < op->nsegs; i++) {
1708                 if (op->segs[i].ds_addr & 3)
1709                         return (0);
1710                 if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3))
1711                         return (0);
1712         }
1713         return (1);
1714 }
1715
1716 static __inline int
1717 hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx)
1718 {
1719         struct hifn_dma *dma = sc->sc_dma;
1720
1721         if (++idx == HIFN_D_DST_RSIZE) {
1722                 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1723                     HIFN_D_MASKDONEIRQ);
1724                 HIFN_DSTR_SYNC(sc, idx,
1725                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1726                 idx = 0;
1727         }
1728         return (idx);
1729 }
1730
1731 static int
1732 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1733 {
1734         struct hifn_dma *dma = sc->sc_dma;
1735         struct hifn_operand *dst = &cmd->dst;
1736         u_int32_t p, l;
1737         int idx, used = 0, i;
1738
1739         idx = dma->dsti;
1740         for (i = 0; i < dst->nsegs - 1; i++) {
1741                 dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1742                 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1743                     HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len);
1744                 HIFN_DSTR_SYNC(sc, idx,
1745                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1746                 used++;
1747
1748                 idx = hifn_dmamap_dstwrap(sc, idx);
1749         }
1750
1751         if (cmd->sloplen == 0) {
1752                 p = dst->segs[i].ds_addr;
1753                 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1754                     dst->segs[i].ds_len;
1755         } else {
1756                 p = sc->sc_dma_physaddr +
1757                     offsetof(struct hifn_dma, slop[cmd->slopidx]);
1758                 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1759                     sizeof(u_int32_t);
1760
1761                 if ((dst->segs[i].ds_len - cmd->sloplen) != 0) {
1762                         dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1763                         dma->dstr[idx].l = htole32(HIFN_D_VALID |
1764                             HIFN_D_MASKDONEIRQ |
1765                             (dst->segs[i].ds_len - cmd->sloplen));
1766                         HIFN_DSTR_SYNC(sc, idx,
1767                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1768                         used++;
1769
1770                         idx = hifn_dmamap_dstwrap(sc, idx);
1771                 }
1772         }
1773         dma->dstr[idx].p = htole32(p);
1774         dma->dstr[idx].l = htole32(l);
1775         HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1776         used++;
1777
1778         idx = hifn_dmamap_dstwrap(sc, idx);
1779
1780         dma->dsti = idx;
1781         dma->dstu += used;
1782         return (idx);
1783 }
1784
1785 static __inline int
1786 hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx)
1787 {
1788         struct hifn_dma *dma = sc->sc_dma;
1789
1790         if (++idx == HIFN_D_SRC_RSIZE) {
1791                 dma->srcr[idx].l = htole32(HIFN_D_VALID |
1792                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1793                 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1794                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1795                 idx = 0;
1796         }
1797         return (idx);
1798 }
1799
1800 static int
1801 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1802 {
1803         struct hifn_dma *dma = sc->sc_dma;
1804         struct hifn_operand *src = &cmd->src;
1805         int idx, i;
1806         u_int32_t last = 0;
1807
1808         idx = dma->srci;
1809         for (i = 0; i < src->nsegs; i++) {
1810                 if (i == src->nsegs - 1)
1811                         last = HIFN_D_LAST;
1812
1813                 dma->srcr[idx].p = htole32(src->segs[i].ds_addr);
1814                 dma->srcr[idx].l = htole32(src->segs[i].ds_len |
1815                     HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1816                 HIFN_SRCR_SYNC(sc, idx,
1817                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1818
1819                 idx = hifn_dmamap_srcwrap(sc, idx);
1820         }
1821         dma->srci = idx;
1822         dma->srcu += src->nsegs;
1823         return (idx);
1824
1825
1826 static void
1827 hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error)
1828 {
1829         struct hifn_operand *op = arg;
1830
1831         KASSERT(nsegs <= MAX_SCATTER,
1832                 ("hifn_op_cb: too many DMA segments (%u > %u) "
1833                  "returned when mapping operand", nsegs, MAX_SCATTER));
1834         op->mapsize = mapsize;
1835         op->nsegs = nsegs;
1836         bcopy(seg, op->segs, nsegs * sizeof (seg[0]));
1837 }
1838
1839 static int 
1840 hifn_crypto(
1841         struct hifn_softc *sc,
1842         struct hifn_command *cmd,
1843         struct cryptop *crp,
1844         int hint)
1845 {
1846         struct  hifn_dma *dma = sc->sc_dma;
1847         u_int32_t cmdlen, csr;
1848         int cmdi, resi, err = 0;
1849
1850         /*
1851          * need 1 cmd, and 1 res
1852          *
1853          * NB: check this first since it's easy.
1854          */
1855         HIFN_LOCK(sc);
1856         if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
1857             (dma->resu + 1) > HIFN_D_RES_RSIZE) {
1858 #ifdef HIFN_DEBUG
1859                 if (hifn_debug) {
1860                         device_printf(sc->sc_dev,
1861                                 "cmd/result exhaustion, cmdu %u resu %u\n",
1862                                 dma->cmdu, dma->resu);
1863                 }
1864 #endif
1865                 hifnstats.hst_nomem_cr++;
1866                 HIFN_UNLOCK(sc);
1867                 return (ERESTART);
1868         }
1869
1870         if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->src_map)) {
1871                 hifnstats.hst_nomem_map++;
1872                 HIFN_UNLOCK(sc);
1873                 return (ENOMEM);
1874         }
1875
1876         if (crp->crp_flags & CRYPTO_F_IMBUF) {
1877                 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
1878                     cmd->src_m, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) {
1879                         hifnstats.hst_nomem_load++;
1880                         err = ENOMEM;
1881                         goto err_srcmap1;
1882                 }
1883         } else if (crp->crp_flags & CRYPTO_F_IOV) {
1884 #if 0
1885                 cmd->src_io->uio_segflg = UIO_USERSPACE;
1886 #endif
1887                 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
1888                     cmd->src_io, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) {
1889                         hifnstats.hst_nomem_load++;
1890                         err = ENOMEM;
1891                         goto err_srcmap1;
1892                 }
1893         } else {
1894                 err = EINVAL;
1895                 goto err_srcmap1;
1896         }
1897
1898         if (hifn_dmamap_aligned(&cmd->src)) {
1899                 cmd->sloplen = cmd->src_mapsize & 3;
1900                 cmd->dst = cmd->src;
1901         } else {
1902                 if (crp->crp_flags & CRYPTO_F_IOV) {
1903                         err = EINVAL;
1904                         goto err_srcmap;
1905                 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1906                         int totlen, len;
1907                         struct mbuf *m, *m0, *mlast;
1908
1909                         KASSERT(cmd->dst_m == cmd->src_m,
1910                                 ("hifn_crypto: dst_m initialized improperly"));
1911                         hifnstats.hst_unaligned++;
1912                         /*
1913                          * Source is not aligned on a longword boundary.
1914                          * Copy the data to insure alignment.  If we fail
1915                          * to allocate mbufs or clusters while doing this
1916                          * we return ERESTART so the operation is requeued
1917                          * at the crypto later, but only if there are
1918                          * ops already posted to the hardware; otherwise we
1919                          * have no guarantee that we'll be re-entered.
1920                          */
1921                         totlen = cmd->src_mapsize;
1922                         if (cmd->src_m->m_flags & M_PKTHDR) {
1923                                 len = MHLEN;
1924                                 MGETHDR(m0, MB_DONTWAIT, MT_DATA);
1925                                 if (m0 && !m_dup_pkthdr(m0, cmd->src_m, MB_DONTWAIT)) {
1926                                         m_free(m0);
1927                                         m0 = NULL;
1928                                 }
1929                         } else {
1930                                 len = MLEN;
1931                                 MGET(m0, MB_DONTWAIT, MT_DATA);
1932                         }
1933                         if (m0 == NULL) {
1934                                 hifnstats.hst_nomem_mbuf++;
1935                                 err = dma->cmdu ? ERESTART : ENOMEM;
1936                                 goto err_srcmap;
1937                         }
1938                         if (totlen >= MINCLSIZE) {
1939                                 MCLGET(m0, MB_DONTWAIT);
1940                                 if ((m0->m_flags & M_EXT) == 0) {
1941                                         hifnstats.hst_nomem_mcl++;
1942                                         err = dma->cmdu ? ERESTART : ENOMEM;
1943                                         m_freem(m0);
1944                                         goto err_srcmap;
1945                                 }
1946                                 len = MCLBYTES;
1947                         }
1948                         totlen -= len;
1949                         m0->m_pkthdr.len = m0->m_len = len;
1950                         mlast = m0;
1951
1952                         while (totlen > 0) {
1953                                 MGET(m, MB_DONTWAIT, MT_DATA);
1954                                 if (m == NULL) {
1955                                         hifnstats.hst_nomem_mbuf++;
1956                                         err = dma->cmdu ? ERESTART : ENOMEM;
1957                                         m_freem(m0);
1958                                         goto err_srcmap;
1959                                 }
1960                                 len = MLEN;
1961                                 if (totlen >= MINCLSIZE) {
1962                                         MCLGET(m, MB_DONTWAIT);
1963                                         if ((m->m_flags & M_EXT) == 0) {
1964                                                 hifnstats.hst_nomem_mcl++;
1965                                                 err = dma->cmdu ? ERESTART : ENOMEM;
1966                                                 mlast->m_next = m;
1967                                                 m_freem(m0);
1968                                                 goto err_srcmap;
1969                                         }
1970                                         len = MCLBYTES;
1971                                 }
1972
1973                                 m->m_len = len;
1974                                 m0->m_pkthdr.len += len;
1975                                 totlen -= len;
1976
1977                                 mlast->m_next = m;
1978                                 mlast = m;
1979                         }
1980                         cmd->dst_m = m0;
1981                 }
1982         }
1983
1984         if (cmd->dst_map == NULL) {
1985                 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->dst_map)) {
1986                         hifnstats.hst_nomem_map++;
1987                         err = ENOMEM;
1988                         goto err_srcmap;
1989                 }
1990                 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1991                         if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
1992                             cmd->dst_m, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) {
1993                                 hifnstats.hst_nomem_map++;
1994                                 err = ENOMEM;
1995                                 goto err_dstmap1;
1996                         }
1997                 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1998 #if 0
1999                         cmd->dst_io->uio_segflg |= UIO_USERSPACE;
2000 #endif
2001                         if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
2002                             cmd->dst_io, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) {
2003                                 hifnstats.hst_nomem_load++;
2004                                 err = ENOMEM;
2005                                 goto err_dstmap1;
2006                         }
2007                 }
2008         }
2009
2010 #ifdef HIFN_DEBUG
2011         if (hifn_debug) {
2012                 device_printf(sc->sc_dev,
2013                     "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
2014                     READ_REG_1(sc, HIFN_1_DMA_CSR),
2015                     READ_REG_1(sc, HIFN_1_DMA_IER),
2016                     dma->cmdu, dma->srcu, dma->dstu, dma->resu,
2017                     cmd->src_nsegs, cmd->dst_nsegs);
2018         }
2019 #endif
2020
2021         if (cmd->src_map == cmd->dst_map) {
2022                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2023                     BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2024         } else {
2025                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2026                     BUS_DMASYNC_PREWRITE);
2027                 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2028                     BUS_DMASYNC_PREREAD);
2029         }
2030
2031         /*
2032          * need N src, and N dst
2033          */
2034         if ((dma->srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE ||
2035             (dma->dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) {
2036 #ifdef HIFN_DEBUG
2037                 if (hifn_debug) {
2038                         device_printf(sc->sc_dev,
2039                                 "src/dst exhaustion, srcu %u+%u dstu %u+%u\n",
2040                                 dma->srcu, cmd->src_nsegs,
2041                                 dma->dstu, cmd->dst_nsegs);
2042                 }
2043 #endif
2044                 hifnstats.hst_nomem_sd++;
2045                 err = ERESTART;
2046                 goto err_dstmap;
2047         }
2048
2049         if (dma->cmdi == HIFN_D_CMD_RSIZE) {
2050                 dma->cmdi = 0;
2051                 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
2052                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2053                 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
2054                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2055         }
2056         cmdi = dma->cmdi++;
2057         cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
2058         HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
2059
2060         /* .p for command/result already set */
2061         dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
2062             HIFN_D_MASKDONEIRQ);
2063         HIFN_CMDR_SYNC(sc, cmdi,
2064             BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2065         dma->cmdu++;
2066
2067         /*
2068          * We don't worry about missing an interrupt (which a "command wait"
2069          * interrupt salvages us from), unless there is more than one command
2070          * in the queue.
2071          */
2072         if (dma->cmdu > 1) {
2073                 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
2074                 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2075         }
2076
2077         hifnstats.hst_ipackets++;
2078         hifnstats.hst_ibytes += cmd->src_mapsize;
2079
2080         hifn_dmamap_load_src(sc, cmd);
2081
2082         /*
2083          * Unlike other descriptors, we don't mask done interrupt from
2084          * result descriptor.
2085          */
2086 #ifdef HIFN_DEBUG
2087         if (hifn_debug)
2088                 kprintf("load res\n");
2089 #endif
2090         if (dma->resi == HIFN_D_RES_RSIZE) {
2091                 dma->resi = 0;
2092                 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
2093                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2094                 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
2095                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2096         }
2097         resi = dma->resi++;
2098         KASSERT(dma->hifn_commands[resi] == NULL,
2099                 ("hifn_crypto: command slot %u busy", resi));
2100         dma->hifn_commands[resi] = cmd;
2101         HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
2102         if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) {
2103                 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2104                     HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
2105                 sc->sc_curbatch++;
2106                 if (sc->sc_curbatch > hifnstats.hst_maxbatch)
2107                         hifnstats.hst_maxbatch = sc->sc_curbatch;
2108                 hifnstats.hst_totbatch++;
2109         } else {
2110                 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2111                     HIFN_D_VALID | HIFN_D_LAST);
2112                 sc->sc_curbatch = 0;
2113         }
2114         HIFN_RESR_SYNC(sc, resi,
2115             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2116         dma->resu++;
2117
2118         if (cmd->sloplen)
2119                 cmd->slopidx = resi;
2120
2121         hifn_dmamap_load_dst(sc, cmd);
2122
2123         csr = 0;
2124         if (sc->sc_c_busy == 0) {
2125                 csr |= HIFN_DMACSR_C_CTRL_ENA;
2126                 sc->sc_c_busy = 1;
2127         }
2128         if (sc->sc_s_busy == 0) {
2129                 csr |= HIFN_DMACSR_S_CTRL_ENA;
2130                 sc->sc_s_busy = 1;
2131         }
2132         if (sc->sc_r_busy == 0) {
2133                 csr |= HIFN_DMACSR_R_CTRL_ENA;
2134                 sc->sc_r_busy = 1;
2135         }
2136         if (sc->sc_d_busy == 0) {
2137                 csr |= HIFN_DMACSR_D_CTRL_ENA;
2138                 sc->sc_d_busy = 1;
2139         }
2140         if (csr)
2141                 WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr);
2142
2143 #ifdef HIFN_DEBUG
2144         if (hifn_debug) {
2145                 device_printf(sc->sc_dev, "command: stat %8x ier %8x\n",
2146                     READ_REG_1(sc, HIFN_1_DMA_CSR),
2147                     READ_REG_1(sc, HIFN_1_DMA_IER));
2148         }
2149 #endif
2150
2151         sc->sc_active = 5;
2152         HIFN_UNLOCK(sc);
2153         KASSERT(err == 0, ("hifn_crypto: success with error %u", err));
2154         return (err);           /* success */
2155
2156 err_dstmap:
2157         if (cmd->src_map != cmd->dst_map)
2158                 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2159 err_dstmap1:
2160         if (cmd->src_map != cmd->dst_map)
2161                 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2162 err_srcmap:
2163         if (crp->crp_flags & CRYPTO_F_IMBUF) {
2164                 if (cmd->src_m != cmd->dst_m)
2165                         m_freem(cmd->dst_m);
2166         }
2167         bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2168 err_srcmap1:
2169         bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2170         HIFN_UNLOCK(sc);
2171         return (err);
2172 }
2173
2174 static void
2175 hifn_tick(void* vsc)
2176 {
2177         struct hifn_softc *sc = vsc;
2178
2179         HIFN_LOCK(sc);
2180         if (sc->sc_active == 0) {
2181                 struct hifn_dma *dma = sc->sc_dma;
2182                 u_int32_t r = 0;
2183
2184                 if (dma->cmdu == 0 && sc->sc_c_busy) {
2185                         sc->sc_c_busy = 0;
2186                         r |= HIFN_DMACSR_C_CTRL_DIS;
2187                 }
2188                 if (dma->srcu == 0 && sc->sc_s_busy) {
2189                         sc->sc_s_busy = 0;
2190                         r |= HIFN_DMACSR_S_CTRL_DIS;
2191                 }
2192                 if (dma->dstu == 0 && sc->sc_d_busy) {
2193                         sc->sc_d_busy = 0;
2194                         r |= HIFN_DMACSR_D_CTRL_DIS;
2195                 }
2196                 if (dma->resu == 0 && sc->sc_r_busy) {
2197                         sc->sc_r_busy = 0;
2198                         r |= HIFN_DMACSR_R_CTRL_DIS;
2199                 }
2200                 if (r)
2201                         WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
2202         } else
2203                 sc->sc_active--;
2204         HIFN_UNLOCK(sc);
2205         callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
2206 }
2207
2208 static void 
2209 hifn_intr(void *arg)
2210 {
2211         struct hifn_softc *sc = arg;
2212         struct hifn_dma *dma;
2213         u_int32_t dmacsr, restart;
2214         int i, u;
2215
2216         dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
2217         
2218         /* Nothing in the DMA unit interrupted */
2219         if ((dmacsr & sc->sc_dmaier) == 0) {
2220                 hifnstats.hst_noirq++;
2221                 return;
2222         }
2223
2224         HIFN_LOCK(sc);
2225
2226         dma = sc->sc_dma;
2227
2228 #ifdef HIFN_DEBUG
2229         if (hifn_debug) {
2230                 device_printf(sc->sc_dev,
2231                     "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n",
2232                     dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier,
2233                     dma->cmdi, dma->srci, dma->dsti, dma->resi,
2234                     dma->cmdk, dma->srck, dma->dstk, dma->resk,
2235                     dma->cmdu, dma->srcu, dma->dstu, dma->resu);
2236         }
2237 #endif
2238
2239         WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
2240
2241         if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
2242             (dmacsr & HIFN_DMACSR_PUBDONE))
2243                 WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
2244                     READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
2245
2246         restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER);
2247         if (restart)
2248                 device_printf(sc->sc_dev, "overrun %x\n", dmacsr);
2249
2250         if (sc->sc_flags & HIFN_IS_7811) {
2251                 if (dmacsr & HIFN_DMACSR_ILLR)
2252                         device_printf(sc->sc_dev, "illegal read\n");
2253                 if (dmacsr & HIFN_DMACSR_ILLW)
2254                         device_printf(sc->sc_dev, "illegal write\n");
2255         }
2256
2257         restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
2258             HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
2259         if (restart) {
2260                 device_printf(sc->sc_dev, "abort, resetting.\n");
2261                 hifnstats.hst_abort++;
2262                 hifn_abort(sc);
2263                 HIFN_UNLOCK(sc);
2264                 return;
2265         }
2266
2267         if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
2268                 /*
2269                  * If no slots to process and we receive a "waiting on
2270                  * command" interrupt, we disable the "waiting on command"
2271                  * (by clearing it).
2272                  */
2273                 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
2274                 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2275         }
2276
2277         /* clear the rings */
2278         i = dma->resk; u = dma->resu;
2279         while (u != 0) {
2280                 HIFN_RESR_SYNC(sc, i,
2281                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2282                 if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
2283                         HIFN_RESR_SYNC(sc, i,
2284                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2285                         break;
2286                 }
2287
2288                 if (i != HIFN_D_RES_RSIZE) {
2289                         struct hifn_command *cmd;
2290                         u_int8_t *macbuf = NULL;
2291
2292                         HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
2293                         cmd = dma->hifn_commands[i];
2294                         KASSERT(cmd != NULL,
2295                                 ("hifn_intr: null command slot %u", i));
2296                         dma->hifn_commands[i] = NULL;
2297
2298                         if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2299                                 macbuf = dma->result_bufs[i];
2300                                 macbuf += 12;
2301                         }
2302
2303                         hifn_callback(sc, cmd, macbuf);
2304                         hifnstats.hst_opackets++;
2305                         u--;
2306                 }
2307
2308                 if (++i == (HIFN_D_RES_RSIZE + 1))
2309                         i = 0;
2310         }
2311         dma->resk = i; dma->resu = u;
2312
2313         i = dma->srck; u = dma->srcu;
2314         while (u != 0) {
2315                 if (i == HIFN_D_SRC_RSIZE)
2316                         i = 0;
2317                 HIFN_SRCR_SYNC(sc, i,
2318                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2319                 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
2320                         HIFN_SRCR_SYNC(sc, i,
2321                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2322                         break;
2323                 }
2324                 i++, u--;
2325         }
2326         dma->srck = i; dma->srcu = u;
2327
2328         i = dma->cmdk; u = dma->cmdu;
2329         while (u != 0) {
2330                 HIFN_CMDR_SYNC(sc, i,
2331                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2332                 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
2333                         HIFN_CMDR_SYNC(sc, i,
2334                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2335                         break;
2336                 }
2337                 if (i != HIFN_D_CMD_RSIZE) {
2338                         u--;
2339                         HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
2340                 }
2341                 if (++i == (HIFN_D_CMD_RSIZE + 1))
2342                         i = 0;
2343         }
2344         dma->cmdk = i; dma->cmdu = u;
2345
2346         HIFN_UNLOCK(sc);
2347
2348         if (sc->sc_needwakeup) {                /* XXX check high watermark */
2349                 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
2350 #ifdef HIFN_DEBUG
2351                 if (hifn_debug)
2352                         device_printf(sc->sc_dev,
2353                                 "wakeup crypto (%x) u %d/%d/%d/%d\n",
2354                                 sc->sc_needwakeup,
2355                                 dma->cmdu, dma->srcu, dma->dstu, dma->resu);
2356 #endif
2357                 sc->sc_needwakeup &= ~wakeup;
2358                 crypto_unblock(sc->sc_cid, wakeup);
2359         }
2360 }
2361
2362 /*
2363  * Allocate a new 'session' and return an encoded session id.  'sidp'
2364  * contains our registration id, and should contain an encoded session
2365  * id on successful allocation.
2366  */
2367 static int
2368 hifn_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
2369 {
2370         struct cryptoini *c;
2371         struct hifn_softc *sc = device_get_softc(dev);
2372         int mac = 0, cry = 0, sesn;
2373         struct hifn_session *ses = NULL;
2374
2375         KASSERT(sc != NULL, ("hifn_newsession: null softc"));
2376         if (sidp == NULL || cri == NULL || sc == NULL)
2377                 return (EINVAL);
2378
2379         HIFN_LOCK(sc);
2380         if (sc->sc_sessions == NULL) {
2381                 ses = sc->sc_sessions = (struct hifn_session *)kmalloc(
2382                     sizeof(*ses), M_DEVBUF, M_NOWAIT);
2383                 if (ses == NULL) {
2384                         HIFN_UNLOCK(sc);
2385                         return (ENOMEM);
2386                 }
2387                 sesn = 0;
2388                 sc->sc_nsessions = 1;
2389         } else {
2390                 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
2391                         if (!sc->sc_sessions[sesn].hs_used) {
2392                                 ses = &sc->sc_sessions[sesn];
2393                                 break;
2394                         }
2395                 }
2396
2397                 if (ses == NULL) {
2398                         sesn = sc->sc_nsessions;
2399                         ses = (struct hifn_session *)kmalloc((sesn + 1) *
2400                             sizeof(*ses), M_DEVBUF, M_NOWAIT);
2401                         if (ses == NULL) {
2402                                 HIFN_UNLOCK(sc);
2403                                 return (ENOMEM);
2404                         }
2405                         bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses));
2406                         bzero(sc->sc_sessions, sesn * sizeof(*ses));
2407                         kfree(sc->sc_sessions, M_DEVBUF);
2408                         sc->sc_sessions = ses;
2409                         ses = &sc->sc_sessions[sesn];
2410                         sc->sc_nsessions++;
2411                 }
2412         }
2413         HIFN_UNLOCK(sc);
2414
2415         bzero(ses, sizeof(*ses));
2416         ses->hs_used = 1;
2417
2418         for (c = cri; c != NULL; c = c->cri_next) {
2419                 switch (c->cri_alg) {
2420                 case CRYPTO_MD5:
2421                 case CRYPTO_SHA1:
2422                 case CRYPTO_MD5_HMAC:
2423                 case CRYPTO_SHA1_HMAC:
2424                         if (mac)
2425                                 return (EINVAL);
2426                         mac = 1;
2427                         ses->hs_mlen = c->cri_mlen;
2428                         if (ses->hs_mlen == 0) {
2429                                 switch (c->cri_alg) {
2430                                 case CRYPTO_MD5:
2431                                 case CRYPTO_MD5_HMAC:
2432                                         ses->hs_mlen = 16;
2433                                         break;
2434                                 case CRYPTO_SHA1:
2435                                 case CRYPTO_SHA1_HMAC:
2436                                         ses->hs_mlen = 20;
2437                                         break;
2438                                 }
2439                         }
2440                         break;
2441                 case CRYPTO_DES_CBC:
2442                 case CRYPTO_3DES_CBC:
2443                 case CRYPTO_AES_CBC:
2444                         /* XXX this may read fewer, does it matter? */
2445                         read_random(ses->hs_iv,
2446                                 c->cri_alg == CRYPTO_AES_CBC ?
2447                                         HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2448                         /*FALLTHROUGH*/
2449                 case CRYPTO_ARC4:
2450                         if (cry)
2451                                 return (EINVAL);
2452                         cry = 1;
2453                         break;
2454                 default:
2455                         return (EINVAL);
2456                 }
2457         }
2458         if (mac == 0 && cry == 0)
2459                 return (EINVAL);
2460
2461         *sidp = HIFN_SID(device_get_unit(sc->sc_dev), sesn);
2462
2463         return (0);
2464 }
2465
2466 /*
2467  * Deallocate a session.
2468  * XXX this routine should run a zero'd mac/encrypt key into context ram.
2469  * XXX to blow away any keys already stored there.
2470  */
2471 #define CRYPTO_SESID2LID(_sid)  (((u_int32_t) (_sid)) & 0xffffffff)
2472
2473 static int
2474 hifn_freesession(device_t dev, u_int64_t tid)
2475 {
2476         struct hifn_softc *sc = device_get_softc(dev);
2477         int session, error;
2478         u_int32_t sid = CRYPTO_SESID2LID(tid);
2479
2480         KASSERT(sc != NULL, ("hifn_freesession: null softc"));
2481         if (sc == NULL)
2482                 return (EINVAL);
2483
2484         HIFN_LOCK(sc);
2485         session = HIFN_SESSION(sid);
2486         if (session < sc->sc_nsessions) {
2487                 bzero(&sc->sc_sessions[session], sizeof(struct hifn_session));
2488                 error = 0;
2489         } else
2490                 error = EINVAL;
2491         HIFN_UNLOCK(sc);
2492
2493         return (error);
2494 }
2495
2496 static int
2497 hifn_process(device_t dev, struct cryptop *crp, int hint)
2498 {
2499         struct hifn_softc *sc = device_get_softc(dev);
2500         struct hifn_command *cmd = NULL;
2501         int session, err, ivlen;
2502         struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
2503
2504         if (crp == NULL || crp->crp_callback == NULL) {
2505                 hifnstats.hst_invalid++;
2506                 return (EINVAL);
2507         }
2508         session = HIFN_SESSION(crp->crp_sid);
2509
2510         if (sc == NULL || session >= sc->sc_nsessions) {
2511                 err = EINVAL;
2512                 goto errout;
2513         }
2514
2515         cmd = kmalloc(sizeof(struct hifn_command), M_DEVBUF, M_INTWAIT | M_ZERO);
2516
2517         if (crp->crp_flags & CRYPTO_F_IMBUF) {
2518                 cmd->src_m = (struct mbuf *)crp->crp_buf;
2519                 cmd->dst_m = (struct mbuf *)crp->crp_buf;
2520         } else if (crp->crp_flags & CRYPTO_F_IOV) {
2521                 cmd->src_io = (struct uio *)crp->crp_buf;
2522                 cmd->dst_io = (struct uio *)crp->crp_buf;
2523         } else {
2524                 err = EINVAL;
2525                 goto errout;    /* XXX we don't handle contiguous buffers! */
2526         }
2527
2528         crd1 = crp->crp_desc;
2529         if (crd1 == NULL) {
2530                 err = EINVAL;
2531                 goto errout;
2532         }
2533         crd2 = crd1->crd_next;
2534
2535         if (crd2 == NULL) {
2536                 if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
2537                     crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2538                     crd1->crd_alg == CRYPTO_SHA1 ||
2539                     crd1->crd_alg == CRYPTO_MD5) {
2540                         maccrd = crd1;
2541                         enccrd = NULL;
2542                 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
2543                     crd1->crd_alg == CRYPTO_3DES_CBC ||
2544                     crd1->crd_alg == CRYPTO_AES_CBC ||
2545                     crd1->crd_alg == CRYPTO_ARC4) {
2546                         if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
2547                                 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2548                         maccrd = NULL;
2549                         enccrd = crd1;
2550                 } else {
2551                         err = EINVAL;
2552                         goto errout;
2553                 }
2554         } else {
2555                 if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
2556                      crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2557                      crd1->crd_alg == CRYPTO_MD5 ||
2558                      crd1->crd_alg == CRYPTO_SHA1) &&
2559                     (crd2->crd_alg == CRYPTO_DES_CBC ||
2560                      crd2->crd_alg == CRYPTO_3DES_CBC ||
2561                      crd2->crd_alg == CRYPTO_AES_CBC ||
2562                      crd2->crd_alg == CRYPTO_ARC4) &&
2563                     ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2564                         cmd->base_masks = HIFN_BASE_CMD_DECODE;
2565                         maccrd = crd1;
2566                         enccrd = crd2;
2567                 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2568                      crd1->crd_alg == CRYPTO_ARC4 ||
2569                      crd1->crd_alg == CRYPTO_3DES_CBC ||
2570                      crd1->crd_alg == CRYPTO_AES_CBC) &&
2571                     (crd2->crd_alg == CRYPTO_MD5_HMAC ||
2572                      crd2->crd_alg == CRYPTO_SHA1_HMAC ||
2573                      crd2->crd_alg == CRYPTO_MD5 ||
2574                      crd2->crd_alg == CRYPTO_SHA1) &&
2575                     (crd1->crd_flags & CRD_F_ENCRYPT)) {
2576                         enccrd = crd1;
2577                         maccrd = crd2;
2578                 } else {
2579                         /*
2580                          * We cannot order the 7751 as requested
2581                          */
2582                         err = EINVAL;
2583                         goto errout;
2584                 }
2585         }
2586
2587         if (enccrd) {
2588                 cmd->enccrd = enccrd;
2589                 cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2590                 switch (enccrd->crd_alg) {
2591                 case CRYPTO_ARC4:
2592                         cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2593                         break;
2594                 case CRYPTO_DES_CBC:
2595                         cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2596                             HIFN_CRYPT_CMD_MODE_CBC |
2597                             HIFN_CRYPT_CMD_NEW_IV;
2598                         break;
2599                 case CRYPTO_3DES_CBC:
2600                         cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2601                             HIFN_CRYPT_CMD_MODE_CBC |
2602                             HIFN_CRYPT_CMD_NEW_IV;
2603                         break;
2604                 case CRYPTO_AES_CBC:
2605                         cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
2606                             HIFN_CRYPT_CMD_MODE_CBC |
2607                             HIFN_CRYPT_CMD_NEW_IV;
2608                         break;
2609                 default:
2610                         err = EINVAL;
2611                         goto errout;
2612                 }
2613                 if (enccrd->crd_alg != CRYPTO_ARC4) {
2614                         ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
2615                                 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2616                         if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2617                                 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2618                                         bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2619                                 else
2620                                         bcopy(sc->sc_sessions[session].hs_iv,
2621                                             cmd->iv, ivlen);
2622
2623                                 if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2624                                     == 0) {
2625                                         crypto_copyback(crp->crp_flags,
2626                                             crp->crp_buf, enccrd->crd_inject,
2627                                             ivlen, cmd->iv);
2628                                 }
2629                         } else {
2630                                 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2631                                         bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2632                                 else {
2633                                         crypto_copydata(crp->crp_flags,
2634                                             crp->crp_buf, enccrd->crd_inject,
2635                                             ivlen, cmd->iv);
2636                                 }
2637                         }
2638                 }
2639
2640                 if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
2641                         cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2642                 cmd->ck = enccrd->crd_key;
2643                 cmd->cklen = enccrd->crd_klen >> 3;
2644                 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2645
2646                 /*
2647                  * Need to specify the size for the AES key in the masks.
2648                  */
2649                 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
2650                     HIFN_CRYPT_CMD_ALG_AES) {
2651                         switch (cmd->cklen) {
2652                         case 16:
2653                                 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
2654                                 break;
2655                         case 24:
2656                                 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
2657                                 break;
2658                         case 32:
2659                                 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
2660                                 break;
2661                         default:
2662                                 err = EINVAL;
2663                                 goto errout;
2664                         }
2665                 }
2666         }
2667
2668         if (maccrd) {
2669                 cmd->maccrd = maccrd;
2670                 cmd->base_masks |= HIFN_BASE_CMD_MAC;
2671
2672                 switch (maccrd->crd_alg) {
2673                 case CRYPTO_MD5:
2674                         cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2675                             HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2676                             HIFN_MAC_CMD_POS_IPSEC;
2677                        break;
2678                 case CRYPTO_MD5_HMAC:
2679                         cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2680                             HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2681                             HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2682                         break;
2683                 case CRYPTO_SHA1:
2684                         cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2685                             HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2686                             HIFN_MAC_CMD_POS_IPSEC;
2687                         break;
2688                 case CRYPTO_SHA1_HMAC:
2689                         cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2690                             HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2691                             HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2692                         break;
2693                 }
2694
2695                 if (maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
2696                     maccrd->crd_alg == CRYPTO_MD5_HMAC) {
2697                         cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2698                         bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
2699                         bzero(cmd->mac + (maccrd->crd_klen >> 3),
2700                             HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2701                 }
2702         }
2703
2704         cmd->crp = crp;
2705         cmd->session_num = session;
2706         cmd->softc = sc;
2707
2708         err = hifn_crypto(sc, cmd, crp, hint);
2709         if (!err) {
2710                 return 0;
2711         } else if (err == ERESTART) {
2712                 /*
2713                  * There weren't enough resources to dispatch the request
2714                  * to the part.  Notify the caller so they'll requeue this
2715                  * request and resubmit it again soon.
2716                  */
2717 #ifdef HIFN_DEBUG
2718                 if (hifn_debug)
2719                         device_printf(sc->sc_dev, "requeue request\n");
2720 #endif
2721                 kfree(cmd, M_DEVBUF);
2722                 sc->sc_needwakeup |= CRYPTO_SYMQ;
2723                 return (err);
2724         }
2725
2726 errout:
2727         if (cmd != NULL)
2728                 kfree(cmd, M_DEVBUF);
2729         if (err == EINVAL)
2730                 hifnstats.hst_invalid++;
2731         else
2732                 hifnstats.hst_nomem++;
2733         crp->crp_etype = err;
2734         crypto_done(crp);
2735         return (err);
2736 }
2737
2738 static void
2739 hifn_abort(struct hifn_softc *sc)
2740 {
2741         struct hifn_dma *dma = sc->sc_dma;
2742         struct hifn_command *cmd;
2743         struct cryptop *crp;
2744         int i, u;
2745
2746         i = dma->resk; u = dma->resu;
2747         while (u != 0) {
2748                 cmd = dma->hifn_commands[i];
2749                 KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i));
2750                 dma->hifn_commands[i] = NULL;
2751                 crp = cmd->crp;
2752
2753                 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2754                         /* Salvage what we can. */
2755                         u_int8_t *macbuf;
2756
2757                         if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2758                                 macbuf = dma->result_bufs[i];
2759                                 macbuf += 12;
2760                         } else
2761                                 macbuf = NULL;
2762                         hifnstats.hst_opackets++;
2763                         hifn_callback(sc, cmd, macbuf);
2764                 } else {
2765                         if (cmd->src_map == cmd->dst_map) {
2766                                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2767                                     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2768                         } else {
2769                                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2770                                     BUS_DMASYNC_POSTWRITE);
2771                                 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2772                                     BUS_DMASYNC_POSTREAD);
2773                         }
2774
2775                         if (cmd->src_m != cmd->dst_m) {
2776                                 m_freem(cmd->src_m);
2777                                 crp->crp_buf = (caddr_t)cmd->dst_m;
2778                         }
2779
2780                         /* non-shared buffers cannot be restarted */
2781                         if (cmd->src_map != cmd->dst_map) {
2782                                 /*
2783                                  * XXX should be EAGAIN, delayed until
2784                                  * after the reset.
2785                                  */
2786                                 crp->crp_etype = ENOMEM;
2787                                 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2788                                 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2789                         } else
2790                                 crp->crp_etype = ENOMEM;
2791
2792                         bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2793                         bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2794
2795                         kfree(cmd, M_DEVBUF);
2796                         if (crp->crp_etype != EAGAIN)
2797                                 crypto_done(crp);
2798                 }
2799
2800                 if (++i == HIFN_D_RES_RSIZE)
2801                         i = 0;
2802                 u--;
2803         }
2804         dma->resk = i; dma->resu = u;
2805
2806         hifn_reset_board(sc, 1);
2807         hifn_init_dma(sc);
2808         hifn_init_pci_registers(sc);
2809 }
2810
2811 static void
2812 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf)
2813 {
2814         struct hifn_dma *dma = sc->sc_dma;
2815         struct cryptop *crp = cmd->crp;
2816         struct cryptodesc *crd;
2817         struct mbuf *m;
2818         int totlen, i, u, ivlen;
2819
2820         if (cmd->src_map == cmd->dst_map) {
2821                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2822                     BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2823         } else {
2824                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2825                     BUS_DMASYNC_POSTWRITE);
2826                 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2827                     BUS_DMASYNC_POSTREAD);
2828         }
2829
2830         if (crp->crp_flags & CRYPTO_F_IMBUF) {
2831                 if (cmd->src_m != cmd->dst_m) {
2832                         crp->crp_buf = (caddr_t)cmd->dst_m;
2833                         totlen = cmd->src_mapsize;
2834                         for (m = cmd->dst_m; m != NULL; m = m->m_next) {
2835                                 if (totlen < m->m_len) {
2836                                         m->m_len = totlen;
2837                                         totlen = 0;
2838                                 } else
2839                                         totlen -= m->m_len;
2840                         }
2841                         cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len;
2842                         m_freem(cmd->src_m);
2843                 }
2844         }
2845
2846         if (cmd->sloplen != 0) {
2847                 crypto_copyback(crp->crp_flags, crp->crp_buf,
2848                     cmd->src_mapsize - cmd->sloplen, cmd->sloplen,
2849                     (caddr_t)&dma->slop[cmd->slopidx]);
2850         }
2851
2852         i = dma->dstk; u = dma->dstu;
2853         while (u != 0) {
2854                 if (i == HIFN_D_DST_RSIZE)
2855                         i = 0;
2856                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2857                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2858                 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2859                         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2860                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2861                         break;
2862                 }
2863                 i++, u--;
2864         }
2865         dma->dstk = i; dma->dstu = u;
2866
2867         hifnstats.hst_obytes += cmd->dst_mapsize;
2868
2869         if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
2870             HIFN_BASE_CMD_CRYPT) {
2871                 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2872                         if (crd->crd_alg != CRYPTO_DES_CBC &&
2873                             crd->crd_alg != CRYPTO_3DES_CBC &&
2874                             crd->crd_alg != CRYPTO_AES_CBC)
2875                                 continue;
2876                         ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
2877                                  HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2878                         crypto_copydata(crp->crp_flags, crp->crp_buf,
2879                             crd->crd_skip + crd->crd_len - ivlen, ivlen,
2880                             cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2881                         break;
2882                 }
2883         }
2884
2885         if (macbuf != NULL) {
2886                 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2887                         int len;
2888
2889                         if (crd->crd_alg != CRYPTO_MD5 &&
2890                             crd->crd_alg != CRYPTO_SHA1 &&
2891                             crd->crd_alg != CRYPTO_MD5_HMAC &&
2892                             crd->crd_alg != CRYPTO_SHA1_HMAC) {
2893                                 continue;
2894                         }
2895                         len = cmd->softc->sc_sessions[cmd->session_num].hs_mlen;
2896                         crypto_copyback(crp->crp_flags, crp->crp_buf,
2897                             crd->crd_inject, len, macbuf);
2898                         break;
2899                 }
2900         }
2901
2902         if (cmd->src_map != cmd->dst_map) {
2903                 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2904                 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2905         }
2906         bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2907         bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2908         kfree(cmd, M_DEVBUF);
2909         crypto_done(crp);
2910 }
2911
2912 /*
2913  * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
2914  * and Group 1 registers; avoid conditions that could create
2915  * burst writes by doing a read in between the writes.
2916  *
2917  * NB: The read we interpose is always to the same register;
2918  *     we do this because reading from an arbitrary (e.g. last)
2919  *     register may not always work.
2920  */
2921 static void
2922 hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2923 {
2924         if (sc->sc_flags & HIFN_IS_7811) {
2925                 if (sc->sc_bar0_lastreg == reg - 4)
2926                         bus_space_read_4(sc->sc_st0, sc->sc_sh0, HIFN_0_PUCNFG);
2927                 sc->sc_bar0_lastreg = reg;
2928         }
2929         bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
2930 }
2931
2932 static void
2933 hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2934 {
2935         if (sc->sc_flags & HIFN_IS_7811) {
2936                 if (sc->sc_bar1_lastreg == reg - 4)
2937                         bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
2938                 sc->sc_bar1_lastreg = reg;
2939         }
2940         bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
2941 }