hifn - Update and unbreak
[dragonfly.git] / sys / dev / crypto / hifn / hifn7751.c
1 /* $FreeBSD: src/sys/dev/hifn/hifn7751.c,v 1.5.2.5 2003/06/04 17:56:59 sam Exp $ */
2 /* $DragonFly: src/sys/dev/crypto/hifn/hifn7751.c,v 1.14 2007/12/04 09:11:12 hasso Exp $ */
3 /* $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $       */
4
5 /*
6  * Invertex AEON / Hifn 7751 driver
7  * Copyright (c) 1999 Invertex Inc. All rights reserved.
8  * Copyright (c) 1999 Theo de Raadt
9  * Copyright (c) 2000-2001 Network Security Technologies, Inc.
10  *                      http://www.netsec.net
11  * Copyright (c) 2003 Hifn Inc.
12  *
13  * This driver is based on a previous driver by Invertex, for which they
14  * requested:  Please send any comments, feedback, bug-fixes, or feature
15  * requests to software@invertex.com.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  *
21  * 1. Redistributions of source code must retain the above copyright
22  *   notice, this list of conditions and the following disclaimer.
23  * 2. Redistributions in binary form must reproduce the above copyright
24  *   notice, this list of conditions and the following disclaimer in the
25  *   documentation and/or other materials provided with the distribution.
26  * 3. The name of the author may not be used to endorse or promote products
27  *   derived from this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
30  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
31  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
32  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
33  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
34  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
38  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39  *
40  * Effort sponsored in part by the Defense Advanced Research Projects
41  * Agency (DARPA) and Air Force Research Laboratory, Air Force
42  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
43  *
44  */
45
46 /*
47  * Driver for various Hifn encryption processors.
48  */
49 #include "opt_hifn.h"
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/proc.h>
54 #include <sys/errno.h>
55 #include <sys/malloc.h>
56 #include <sys/kernel.h>
57 #include <sys/mbuf.h>
58 #include <sys/sysctl.h>
59 #include <sys/bus.h>
60 #include <sys/rman.h>
61 #include <sys/random.h>
62 #include <sys/thread2.h>
63 #include <sys/uio.h>
64
65 #include <vm/vm.h>
66 #include <vm/pmap.h>
67
68 #include <machine/clock.h>
69 #include <opencrypto/cryptodev.h>
70
71 #include "cryptodev_if.h"
72
73 #include <bus/pci/pcivar.h>
74 #include <bus/pci/pcireg.h>
75
76 #ifdef HIFN_RNDTEST
77 #include "../rndtest/rndtest.h"
78 #endif
79 #include "hifn7751reg.h"
80 #include "hifn7751var.h"
81
82 /*
83  * Prototypes and count for the pci_device structure
84  */
85 static  int hifn_probe(device_t);
86 static  int hifn_attach(device_t);
87 static  int hifn_detach(device_t);
88 static  int hifn_suspend(device_t);
89 static  int hifn_resume(device_t);
90 static  void hifn_shutdown(device_t);
91
92 static  void hifn_reset_board(struct hifn_softc *, int);
93 static  void hifn_reset_puc(struct hifn_softc *);
94 static  void hifn_puc_wait(struct hifn_softc *);
95 static  int hifn_enable_crypto(struct hifn_softc *);
96 static  void hifn_set_retry(struct hifn_softc *sc);
97 static  void hifn_init_dma(struct hifn_softc *);
98 static  void hifn_init_pci_registers(struct hifn_softc *);
99 static  int hifn_sramsize(struct hifn_softc *);
100 static  int hifn_dramsize(struct hifn_softc *);
101 static  int hifn_ramtype(struct hifn_softc *);
102 static  void hifn_sessions(struct hifn_softc *);
103 static  void hifn_intr(void *);
104 static  u_int hifn_write_command(struct hifn_command *, u_int8_t *);
105 static  u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
106 static  int hifn_newsession(device_t, u_int32_t *, struct cryptoini *);
107 static  int hifn_freesession(device_t, u_int64_t);
108 static  int hifn_process(device_t, struct cryptop *, int);
109 static  void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
110 static  int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int);
111 static  int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
112 static  int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
113 static  int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
114 static  int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
115 static  int hifn_init_pubrng(struct hifn_softc *);
116 #ifndef HIFN_NO_RNG
117 static  void hifn_rng(void *);
118 #endif
119 static  void hifn_tick(void *);
120 static  void hifn_abort(struct hifn_softc *);
121 static  void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
122
123 static  void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t);
124 static  void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t);
125
126
127 static device_method_t hifn_methods[] = {
128         /* Device interface */
129         DEVMETHOD(device_probe,         hifn_probe),
130         DEVMETHOD(device_attach,        hifn_attach),
131         DEVMETHOD(device_detach,        hifn_detach),
132         DEVMETHOD(device_suspend,       hifn_suspend),
133         DEVMETHOD(device_resume,        hifn_resume),
134         DEVMETHOD(device_shutdown,      hifn_shutdown),
135
136         /* bus interface */
137         DEVMETHOD(bus_print_child,      bus_generic_print_child),
138         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
139
140         /* crypto device methods */
141         DEVMETHOD(cryptodev_newsession, hifn_newsession),
142         DEVMETHOD(cryptodev_freesession,hifn_freesession),
143         DEVMETHOD(cryptodev_process,    hifn_process),
144
145         { 0, 0 }
146 };
147 static driver_t hifn_driver = {
148         "hifn",
149         hifn_methods,
150         sizeof (struct hifn_softc)
151 };
152 static devclass_t hifn_devclass;
153
154 DECLARE_DUMMY_MODULE(hifn);
155 DRIVER_MODULE(hifn, pci, hifn_driver, hifn_devclass, 0, 0);
156 MODULE_DEPEND(hifn, crypto, 1, 1, 1);
157 #ifdef HIFN_RNDTEST
158 MODULE_DEPEND(hifn, rndtest, 1, 1, 1);
159 #endif
160
161 static __inline__ u_int32_t
162 READ_REG_0(struct hifn_softc *sc, bus_size_t reg)
163 {
164     u_int32_t v = bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg);
165     sc->sc_bar0_lastreg = (bus_size_t) -1;
166     return (v);
167 }
168 #define WRITE_REG_0(sc, reg, val)       hifn_write_reg_0(sc, reg, val)
169
170 static __inline__ u_int32_t
171 READ_REG_1(struct hifn_softc *sc, bus_size_t reg)
172 {
173     u_int32_t v = bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg);
174     sc->sc_bar1_lastreg = (bus_size_t) -1;
175     return (v);
176 }
177 #define WRITE_REG_1(sc, reg, val)       hifn_write_reg_1(sc, reg, val)
178
179 SYSCTL_NODE(_hw, OID_AUTO, hifn, CTLFLAG_RD, 0, "Hifn driver parameters");
180
181 #ifdef HIFN_DEBUG
182 static  int hifn_debug = 0;
183 SYSCTL_INT(_hw_hifn, OID_AUTO, debug, CTLFLAG_RW, &hifn_debug,
184             0, "control debugging msgs");
185 #endif
186
187 static  struct hifn_stats hifnstats;
188 SYSCTL_STRUCT(_hw_hifn, OID_AUTO, stats, CTLFLAG_RD, &hifnstats,
189             hifn_stats, "driver statistics");
190 static  int hifn_maxbatch = 1;
191 SYSCTL_INT(_hw_hifn, OID_AUTO, maxbatch, CTLFLAG_RW, &hifn_maxbatch,
192             0, "max ops to batch w/o interrupt");
193
194 /*
195  * Probe for a supported device.  The PCI vendor and device
196  * IDs are used to detect devices we know how to handle.
197  */
198 static int
199 hifn_probe(device_t dev)
200 {
201         if (pci_get_vendor(dev) == PCI_VENDOR_INVERTEX &&
202             pci_get_device(dev) == PCI_PRODUCT_INVERTEX_AEON)
203                 return (0);
204         if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
205             (pci_get_device(dev) == PCI_PRODUCT_HIFN_7751 ||
206              pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
207              pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
208              pci_get_device(dev) == PCI_PRODUCT_HIFN_7956 ||
209              pci_get_device(dev) == PCI_PRODUCT_HIFN_7811))
210                 return (0);
211         if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
212             pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751)
213                 return (0);
214         if (pci_get_vendor(dev) == PCI_VENDOR_HIFN) {
215                 device_printf(dev,"device id = 0x%x\n", pci_get_device(dev) );
216                 return (0);
217         }
218         return (ENXIO);
219 }
220
221 static void
222 hifn_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
223 {
224         bus_addr_t *paddr = (bus_addr_t*) arg;
225         *paddr = segs->ds_addr;
226 }
227
228 static const char*
229 hifn_partname(struct hifn_softc *sc)
230 {
231         /* XXX sprintf numbers when not decoded */
232         switch (pci_get_vendor(sc->sc_dev)) {
233         case PCI_VENDOR_HIFN:
234                 switch (pci_get_device(sc->sc_dev)) {
235                 case PCI_PRODUCT_HIFN_6500:     return "Hifn 6500";
236                 case PCI_PRODUCT_HIFN_7751:     return "Hifn 7751";
237                 case PCI_PRODUCT_HIFN_7811:     return "Hifn 7811";
238                 case PCI_PRODUCT_HIFN_7951:     return "Hifn 7951";
239                 case PCI_PRODUCT_HIFN_7955:     return "Hifn 7955";
240                 case PCI_PRODUCT_HIFN_7956:     return "Hifn 7956";
241                 }
242                 return "Hifn unknown-part";
243         case PCI_VENDOR_INVERTEX:
244                 switch (pci_get_device(sc->sc_dev)) {
245                 case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON";
246                 }
247                 return "Invertex unknown-part";
248         case PCI_VENDOR_NETSEC:
249                 switch (pci_get_device(sc->sc_dev)) {
250                 case PCI_PRODUCT_NETSEC_7751:   return "NetSec 7751";
251                 }
252                 return "NetSec unknown-part";
253         }
254         return "Unknown-vendor unknown-part";
255 }
256
257 static void
258 default_harvest(struct rndtest_state *rsp, void *buf, u_int count)
259 {
260         u_int32_t *p = (u_int32_t *)buf;
261         for (count /= sizeof (u_int32_t); count; count--)
262                 add_true_randomness(*p++);
263 }
264
265 static u_int
266 checkmaxmin(device_t dev, const char *what, u_int v, u_int min, u_int max)
267 {
268         if (v > max) {
269                 device_printf(dev, "Warning, %s %u out of range, "
270                     "using max %u\n", what, v, max);
271                 v = max;
272         } else if (v < min) {
273                 device_printf(dev, "Warning, %s %u out of range, "
274                     "using min %u\n", what, v, min);
275                 v = min;
276         }
277         return v;
278 }
279
280 /*
281  * Select PLL configuration for 795x parts.  This is complicated in
282  * that we cannot determine the optimal parameters without user input.
283  * The reference clock is derived from an external clock through a
284  * multiplier.  The external clock is either the host bus (i.e. PCI)
285  * or an external clock generator.  When using the PCI bus we assume
286  * the clock is either 33 or 66 MHz; for an external source we cannot
287  * tell the speed.
288  *
289  * PLL configuration is done with a string: "pci" for PCI bus, or "ext"
290  * for an external source, followed by the frequency.  We calculate
291  * the appropriate multiplier and PLL register contents accordingly.
292  * When no configuration is given we default to "pci66" since that
293  * always will allow the card to work.  If a card is using the PCI
294  * bus clock and in a 33MHz slot then it will be operating at half
295  * speed until the correct information is provided.
296  *
297  * We use a default setting of "ext66" because according to Mike Ham
298  * of HiFn, almost every board in existence has an external crystal
299  * populated at 66Mhz. Using PCI can be a problem on modern motherboards,
300  * because PCI33 can have clocks from 0 to 33Mhz, and some have
301  * non-PCI-compliant spread-spectrum clocks, which can confuse the pll.
302  */
303 static void
304 hifn_getpllconfig(device_t dev, u_int *pll)
305 {
306         char *pllspec;
307         u_int freq, mul, fl, fh;
308         u_int32_t pllconfig;
309         char *nxt;
310
311         if (resource_string_value("hifn", device_get_unit(dev),
312             "pllconfig", &pllspec))
313                 pllspec = "ext66";
314         fl = 33, fh = 66;
315         pllconfig = 0;
316         if (strncmp(pllspec, "ext", 3) == 0) {
317                 pllspec += 3;
318                 pllconfig |= HIFN_PLL_REF_SEL;
319                 switch (pci_get_device(dev)) {
320                 case PCI_PRODUCT_HIFN_7955:
321                 case PCI_PRODUCT_HIFN_7956:
322                         fl = 20, fh = 100;
323                         break;
324 #ifdef notyet
325                 case PCI_PRODUCT_HIFN_7954:
326                         fl = 20, fh = 66;
327                         break;
328 #endif
329                 }
330         } else if (strncmp(pllspec, "pci", 3) == 0)
331                 pllspec += 3;
332         freq = strtoul(pllspec, &nxt, 10);
333         if (nxt == pllspec)
334                 freq = 66;
335         else
336                 freq = checkmaxmin(dev, "frequency", freq, fl, fh);
337         /*
338          * Calculate multiplier.  We target a Fck of 266 MHz,
339          * allowing only even values, possibly rounded down.
340          * Multipliers > 8 must set the charge pump current.
341          */
342         mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12);
343         pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT;
344         if (mul > 8)
345                 pllconfig |= HIFN_PLL_IS;
346         *pll = pllconfig;
347 }
348
349 /*
350  * Attach an interface that successfully probed.
351  */
352 static int 
353 hifn_attach(device_t dev)
354 {
355         struct hifn_softc *sc = device_get_softc(dev);
356         u_int32_t cmd;
357         caddr_t kva;
358         int rseg, rid;
359         char rbase;
360         u_int16_t ena, rev;
361
362         KASSERT(sc != NULL, ("hifn_attach: null software carrier!"));
363         bzero(sc, sizeof (*sc));
364         sc->sc_dev = dev;
365
366         lockinit(&sc->sc_lock, __DECONST(char *, device_get_nameunit(dev)),
367             0, LK_CANRECURSE);
368
369         /* XXX handle power management */
370
371         /*
372          * The 7951 and 795x have a random number generator and
373          * public key support; note this.
374          */
375         if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
376             (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
377              pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
378              pci_get_device(dev) == PCI_PRODUCT_HIFN_7956))
379                 sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC;
380         /*
381          * The 7811 has a random number generator and
382          * we also note it's identity 'cuz of some quirks.
383          */
384         if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
385             pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)
386                 sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG;
387
388         /*
389          * The 795x parts support AES.
390          */
391         if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
392             (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
393              pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) {
394                 sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES;
395                 /*
396                  * Select PLL configuration.  This depends on the
397                  * bus and board design and must be manually configured
398                  * if the default setting is unacceptable.
399                  */
400                 hifn_getpllconfig(dev, &sc->sc_pllconfig);
401         }
402
403         /*
404          * Configure support for memory-mapped access to
405          * registers and for DMA operations.
406          */
407 #define PCIM_ENA        (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN)
408         cmd = pci_read_config(dev, PCIR_COMMAND, 4);
409         cmd |= PCIM_ENA;
410         pci_write_config(dev, PCIR_COMMAND, cmd, 4);
411         cmd = pci_read_config(dev, PCIR_COMMAND, 4);
412         if ((cmd & PCIM_ENA) != PCIM_ENA) {
413                 device_printf(dev, "failed to enable %s\n",
414                         (cmd & PCIM_ENA) == 0 ?
415                                 "memory mapping & bus mastering" :
416                         (cmd & PCIM_CMD_MEMEN) == 0 ?
417                                 "memory mapping" : "bus mastering");
418                 goto fail_pci;
419         }
420 #undef PCIM_ENA
421
422         /*
423          * Setup PCI resources. Note that we record the bus
424          * tag and handle for each register mapping, this is
425          * used by the READ_REG_0, WRITE_REG_0, READ_REG_1,
426          * and WRITE_REG_1 macros throughout the driver.
427          */
428         rid = HIFN_BAR0;
429         sc->sc_bar0res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
430                                             0, ~0, 1, RF_ACTIVE);
431         if (sc->sc_bar0res == NULL) {
432                 device_printf(dev, "cannot map bar%d register space\n", 0);
433                 goto fail_pci;
434         }
435         sc->sc_st0 = rman_get_bustag(sc->sc_bar0res);
436         sc->sc_sh0 = rman_get_bushandle(sc->sc_bar0res);
437         sc->sc_bar0_lastreg = (bus_size_t) -1;
438
439         rid = HIFN_BAR1;
440         sc->sc_bar1res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
441                                             0, ~0, 1, RF_ACTIVE);
442         if (sc->sc_bar1res == NULL) {
443                 device_printf(dev, "cannot map bar%d register space\n", 1);
444                 goto fail_io0;
445         }
446         sc->sc_st1 = rman_get_bustag(sc->sc_bar1res);
447         sc->sc_sh1 = rman_get_bushandle(sc->sc_bar1res);
448         sc->sc_bar1_lastreg = (bus_size_t) -1;
449
450         hifn_set_retry(sc);
451
452         /*
453          * Setup the area where the Hifn DMA's descriptors
454          * and associated data structures.
455          */
456         if (bus_dma_tag_create(NULL,                    /* parent */
457                                1, 0,                    /* alignment,boundary */
458                                BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
459                                BUS_SPACE_MAXADDR,       /* highaddr */
460                                NULL, NULL,              /* filter, filterarg */
461                                HIFN_MAX_DMALEN,         /* maxsize */
462                                MAX_SCATTER,             /* nsegments */
463                                HIFN_MAX_SEGLEN,         /* maxsegsize */
464                                BUS_DMA_ALLOCNOW,        /* flags */
465                                &sc->sc_dmat)) {
466                 device_printf(dev, "cannot allocate DMA tag\n");
467                 goto fail_io1;
468         }
469         if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
470                 device_printf(dev, "cannot create dma map\n");
471                 bus_dma_tag_destroy(sc->sc_dmat);
472                 goto fail_io1;
473         }
474         if (bus_dmamem_alloc(sc->sc_dmat, (void**) &kva, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
475                 device_printf(dev, "cannot alloc dma buffer\n");
476                 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
477                 bus_dma_tag_destroy(sc->sc_dmat);
478                 goto fail_io1;
479         }
480         if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, kva,
481                              sizeof (*sc->sc_dma),
482                              hifn_dmamap_cb, &sc->sc_dma_physaddr,
483                              BUS_DMA_NOWAIT)) {
484                 device_printf(dev, "cannot load dma map\n");
485                 bus_dmamem_free(sc->sc_dmat, kva, sc->sc_dmamap);
486                 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
487                 bus_dma_tag_destroy(sc->sc_dmat);
488                 goto fail_io1;
489         }
490         sc->sc_dma = (struct hifn_dma *)kva;
491         bzero(sc->sc_dma, sizeof(*sc->sc_dma));
492
493         KASSERT(sc->sc_st0 != 0, ("hifn_attach: null bar0 tag!"));
494         KASSERT(sc->sc_sh0 != 0, ("hifn_attach: null bar0 handle!"));
495         KASSERT(sc->sc_st1 != 0, ("hifn_attach: null bar1 tag!"));
496         KASSERT(sc->sc_sh1 != 0, ("hifn_attach: null bar1 handle!"));
497
498         /*
499          * Reset the board and do the ``secret handshake''
500          * to enable the crypto support.  Then complete the
501          * initialization procedure by setting up the interrupt
502          * and hooking in to the system crypto support so we'll
503          * get used for system services like the crypto device,
504          * IPsec, RNG device, etc.
505          */
506         hifn_reset_board(sc, 0);
507
508         if (hifn_enable_crypto(sc) != 0) {
509                 device_printf(dev, "crypto enabling failed\n");
510                 goto fail_mem;
511         }
512         hifn_reset_puc(sc);
513
514         hifn_init_dma(sc);
515         hifn_init_pci_registers(sc);
516
517         /* XXX can't dynamically determine ram type for 795x; force dram */
518         if (sc->sc_flags & HIFN_IS_7956)
519                 sc->sc_drammodel = 1;
520         else if (hifn_ramtype(sc))
521                 goto fail_mem;
522
523         if (sc->sc_drammodel == 0)
524                 hifn_sramsize(sc);
525         else
526                 hifn_dramsize(sc);
527
528         /*
529          * Workaround for NetSec 7751 rev A: half ram size because two
530          * of the address lines were left floating
531          */
532         if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
533             pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 &&
534             pci_get_revid(dev) == 0x61) /*XXX???*/
535                 sc->sc_ramsize >>= 1;
536
537         /*
538          * Arrange the interrupt line.
539          */
540         rid = 0;
541         sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid,
542                                         0, ~0, 1, RF_SHAREABLE|RF_ACTIVE);
543         if (sc->sc_irq == NULL) {
544                 device_printf(dev, "could not map interrupt\n");
545                 goto fail_mem;
546         }
547         /*
548          * NB: Network code assumes we are blocked with splimp()
549          *     so make sure the IRQ is marked appropriately.
550          */
551         if (bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
552                            hifn_intr, sc,
553                            &sc->sc_intrhand, NULL)) {
554                 device_printf(dev, "could not setup interrupt\n");
555                 goto fail_intr2;
556         }
557
558         hifn_sessions(sc);
559
560         /*
561          * NB: Keep only the low 16 bits; this masks the chip id
562          *     from the 7951.
563          */
564         rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff;
565
566         rseg = sc->sc_ramsize / 1024;
567         rbase = 'K';
568         if (sc->sc_ramsize >= (1024 * 1024)) {
569                 rbase = 'M';
570                 rseg /= 1024;
571         }
572         device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram, %u sessions\n",
573                 hifn_partname(sc), rev,
574                 rseg, rbase, sc->sc_drammodel ? 'd' : 's',
575                 sc->sc_maxses);
576
577         if (sc->sc_flags & HIFN_IS_7956)
578                 kprintf(", pll=0x%x<%s clk, %ux mult>",
579                         sc->sc_pllconfig,
580                         sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
581                         2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
582         kprintf("\n");
583
584         sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
585         if (sc->sc_cid < 0) {
586                 device_printf(dev, "could not get crypto driver id\n");
587                 goto fail_intr;
588         }
589
590         WRITE_REG_0(sc, HIFN_0_PUCNFG,
591             READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
592         ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
593
594         switch (ena) {
595         case HIFN_PUSTAT_ENA_2:
596                 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
597                 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
598                 if (sc->sc_flags & HIFN_HAS_AES)
599                         crypto_register(sc->sc_cid, CRYPTO_AES_CBC,  0, 0);
600                 /*FALLTHROUGH*/
601         case HIFN_PUSTAT_ENA_1:
602                 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
603                 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
604                 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
605                 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
606                 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
607                 break;
608         }
609
610         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
611             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
612
613         if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
614                 hifn_init_pubrng(sc);
615
616         /* NB: 1 means the callout runs w/o Giant locked */
617         callout_init_mp(&sc->sc_tickto);
618         callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
619
620         return (0);
621
622 fail_intr:
623         bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
624 fail_intr2:
625         /* XXX don't store rid */
626         bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
627 fail_mem:
628         bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
629         bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
630         bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
631         bus_dma_tag_destroy(sc->sc_dmat);
632
633         /* Turn off DMA polling */
634         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
635             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
636 fail_io1:
637         bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
638 fail_io0:
639         bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
640 fail_pci:
641         lockuninit(&sc->sc_lock);
642         return (ENXIO);
643 }
644
645 /*
646  * Detach an interface that successfully probed.
647  */
648 static int 
649 hifn_detach(device_t dev)
650 {
651         struct hifn_softc *sc = device_get_softc(dev);
652
653         KASSERT(sc != NULL, ("hifn_detach: null software carrier!"));
654
655         /* disable interrupts */
656         WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
657
658         /*XXX other resources */
659         callout_stop(&sc->sc_tickto);
660         callout_stop(&sc->sc_rngto);
661 #ifdef HIFN_RNDTEST
662         if (sc->sc_rndtest)
663                 rndtest_detach(sc->sc_rndtest);
664 #endif
665
666         /* Turn off DMA polling */
667         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
668             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
669
670         crypto_unregister_all(sc->sc_cid);
671
672         bus_generic_detach(dev);        /*XXX should be no children, right? */
673
674         bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
675         /* XXX don't store rid */
676         bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
677
678         bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
679         bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
680         bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
681         bus_dma_tag_destroy(sc->sc_dmat);
682
683         bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
684         bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
685
686         lockuninit(&sc->sc_lock);
687
688         return (0);
689 }
690
691 /*
692  * Stop all chip I/O so that the kernel's probe routines don't
693  * get confused by errant DMAs when rebooting.
694  */
695 static void
696 hifn_shutdown(device_t dev)
697 {
698 #ifdef notyet
699         hifn_stop(device_get_softc(dev));
700 #endif
701 }
702
703 /*
704  * Device suspend routine.  Stop the interface and save some PCI
705  * settings in case the BIOS doesn't restore them properly on
706  * resume.
707  */
708 static int
709 hifn_suspend(device_t dev)
710 {
711         struct hifn_softc *sc = device_get_softc(dev);
712 #ifdef notyet
713         int i;
714
715         hifn_stop(sc);
716         for (i = 0; i < 5; i++)
717                 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4);
718         sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
719         sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
720         sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
721         sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
722 #endif
723         sc->sc_suspended = 1;
724
725         return (0);
726 }
727
728 /*
729  * Device resume routine.  Restore some PCI settings in case the BIOS
730  * doesn't, re-enable busmastering, and restart the interface if
731  * appropriate.
732  */
733 static int
734 hifn_resume(device_t dev)
735 {
736         struct hifn_softc *sc = device_get_softc(dev);
737 #ifdef notyet
738         int i;
739
740         /* better way to do this? */
741         for (i = 0; i < 5; i++)
742                 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4);
743         pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4);
744         pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1);
745         pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1);
746         pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1);
747
748         /* reenable busmastering */
749         pci_enable_busmaster(dev);
750         pci_enable_io(dev, HIFN_RES);
751
752         /* reinitialize interface if necessary */
753         if (ifp->if_flags & IFF_UP)
754                 rl_init(sc);
755 #endif
756         sc->sc_suspended = 0;
757
758         return (0);
759 }
760
761 static int
762 hifn_init_pubrng(struct hifn_softc *sc)
763 {
764         u_int32_t r;
765         int i;
766
767 #ifdef HIFN_RNDTEST
768         sc->sc_rndtest = rndtest_attach(sc->sc_dev);
769         if (sc->sc_rndtest)
770                 sc->sc_harvest = rndtest_harvest;
771         else
772                 sc->sc_harvest = default_harvest;
773 #else
774         sc->sc_harvest = default_harvest;
775 #endif
776         if ((sc->sc_flags & HIFN_IS_7811) == 0) {
777                 /* Reset 7951 public key/rng engine */
778                 WRITE_REG_1(sc, HIFN_1_PUB_RESET,
779                     READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
780
781                 for (i = 0; i < 100; i++) {
782                         DELAY(1000);
783                         if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
784                             HIFN_PUBRST_RESET) == 0)
785                                 break;
786                 }
787
788                 if (i == 100) {
789                         device_printf(sc->sc_dev, "public key init failed\n");
790                         return (1);
791                 }
792         }
793
794 #ifndef HIFN_NO_RNG
795         /* Enable the rng, if available */
796         if (sc->sc_flags & HIFN_HAS_RNG) {
797                 if (sc->sc_flags & HIFN_IS_7811) {
798                         r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
799                         if (r & HIFN_7811_RNGENA_ENA) {
800                                 r &= ~HIFN_7811_RNGENA_ENA;
801                                 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
802                         }
803                         WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
804                             HIFN_7811_RNGCFG_DEFL);
805                         r |= HIFN_7811_RNGENA_ENA;
806                         WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
807                 } else
808                         WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
809                             READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
810                             HIFN_RNGCFG_ENA);
811
812                 sc->sc_rngfirst = 1;
813                 if (hz >= 100)
814                         sc->sc_rnghz = hz / 100;
815                 else
816                         sc->sc_rnghz = 1;
817                 /* NB: 1 means the callout runs w/o Giant locked */
818                 callout_init_mp(&sc->sc_rngto);
819                 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
820         }
821 #endif
822
823         /* Enable public key engine, if available */
824         if (sc->sc_flags & HIFN_HAS_PUBLIC) {
825                 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
826                 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
827                 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
828         }
829
830         return (0);
831 }
832
833 #ifndef HIFN_NO_RNG
834 static void
835 hifn_rng(void *vsc)
836 {
837 #define RANDOM_BITS(n)  (n)*sizeof (u_int32_t), (n)*sizeof (u_int32_t)*NBBY, 0
838         struct hifn_softc *sc = vsc;
839         u_int32_t sts, num[2];
840         int i;
841
842         if (sc->sc_flags & HIFN_IS_7811) {
843                 for (i = 0; i < 5; i++) {
844                         sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
845                         if (sts & HIFN_7811_RNGSTS_UFL) {
846                                 device_printf(sc->sc_dev,
847                                               "RNG underflow: disabling\n");
848                                 return;
849                         }
850                         if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
851                                 break;
852
853                         /*
854                          * There are at least two words in the RNG FIFO
855                          * at this point.
856                          */
857                         num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
858                         num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
859                         /* NB: discard first data read */
860                         if (sc->sc_rngfirst)
861                                 sc->sc_rngfirst = 0;
862                         else
863                                 (*sc->sc_harvest)(sc->sc_rndtest,
864                                         num, sizeof (num));
865                 }
866         } else {
867                 num[0] = READ_REG_1(sc, HIFN_1_RNG_DATA);
868
869                 /* NB: discard first data read */
870                 if (sc->sc_rngfirst)
871                         sc->sc_rngfirst = 0;
872                 else
873                         (*sc->sc_harvest)(sc->sc_rndtest,
874                                 num, sizeof (num[0]));
875         }
876
877         callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
878 #undef RANDOM_BITS
879 }
880 #endif
881
882 static void
883 hifn_puc_wait(struct hifn_softc *sc)
884 {
885         int i;
886         int reg = HIFN_0_PUCTRL;
887
888         if (sc->sc_flags & HIFN_IS_7956) {
889                 reg = HIFN_0_PUCTRL2;
890         }
891
892         for (i = 5000; i > 0; i--) {
893                 DELAY(1);
894                 if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET))
895                         break;
896         }
897         if (!i)
898                 device_printf(sc->sc_dev, "proc unit did not reset\n");
899 }
900
901 /*
902  * Reset the processing unit.
903  */
904 static void
905 hifn_reset_puc(struct hifn_softc *sc)
906 {
907         int reg = HIFN_0_PUCTRL;
908
909         if (sc->sc_flags & HIFN_IS_7956) {
910                 reg = HIFN_0_PUCTRL2;
911         }
912
913         /* Reset processing unit */
914         WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA);
915         hifn_puc_wait(sc);
916 }
917
918 /*
919  * Set the Retry and TRDY registers; note that we set them to
920  * zero because the 7811 locks up when forced to retry (section
921  * 3.6 of "Specification Update SU-0014-04".  Not clear if we
922  * should do this for all Hifn parts, but it doesn't seem to hurt.
923  */
924 static void
925 hifn_set_retry(struct hifn_softc *sc)
926 {
927         /* NB: RETRY only responds to 8-bit reads/writes */
928         pci_write_config(sc->sc_dev, HIFN_RETRY_TIMEOUT, 0, 1);
929         pci_write_config(sc->sc_dev, HIFN_TRDY_TIMEOUT, 0, 4);
930 }
931
932 /*
933  * Resets the board.  Values in the regesters are left as is
934  * from the reset (i.e. initial values are assigned elsewhere).
935  */
936 static void
937 hifn_reset_board(struct hifn_softc *sc, int full)
938 {
939         u_int32_t reg;
940
941         /*
942          * Set polling in the DMA configuration register to zero.  0x7 avoids
943          * resetting the board and zeros out the other fields.
944          */
945         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
946             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
947
948         /*
949          * Now that polling has been disabled, we have to wait 1 ms
950          * before resetting the board.
951          */
952         DELAY(1000);
953
954         /* Reset the DMA unit */
955         if (full) {
956                 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
957                 DELAY(1000);
958         } else {
959                 WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
960                     HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
961                 hifn_reset_puc(sc);
962         }
963
964         KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!"));
965         bzero(sc->sc_dma, sizeof(*sc->sc_dma));
966
967         /* Bring dma unit out of reset */
968         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
969             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
970
971         hifn_puc_wait(sc);
972         hifn_set_retry(sc);
973
974         if (sc->sc_flags & HIFN_IS_7811) {
975                 for (reg = 0; reg < 1000; reg++) {
976                         if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
977                             HIFN_MIPSRST_CRAMINIT)
978                                 break;
979                         DELAY(1000);
980                 }
981                 if (reg == 1000)
982                         kprintf(": cram init timeout\n");
983         } else {
984           /* set up DMA configuration register #2 */
985           /* turn off all PK and BAR0 swaps */
986           WRITE_REG_1(sc, HIFN_1_DMA_CNFG2,
987                       (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)|
988                       (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)|
989                       (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)|
990                       (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT));
991         }
992 }
993
994 static u_int32_t
995 hifn_next_signature(u_int32_t a, u_int cnt)
996 {
997         int i;
998         u_int32_t v;
999
1000         for (i = 0; i < cnt; i++) {
1001
1002                 /* get the parity */
1003                 v = a & 0x80080125;
1004                 v ^= v >> 16;
1005                 v ^= v >> 8;
1006                 v ^= v >> 4;
1007                 v ^= v >> 2;
1008                 v ^= v >> 1;
1009
1010                 a = (v & 1) ^ (a << 1);
1011         }
1012
1013         return a;
1014 }
1015
1016 struct pci2id {
1017         u_short         pci_vendor;
1018         u_short         pci_prod;
1019         char            card_id[13];
1020 };
1021 static struct pci2id pci2id[] = {
1022         {
1023                 PCI_VENDOR_HIFN,
1024                 PCI_PRODUCT_HIFN_7951,
1025                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1026                   0x00, 0x00, 0x00, 0x00, 0x00 }
1027         }, {
1028                 PCI_VENDOR_HIFN,
1029                 PCI_PRODUCT_HIFN_7955,
1030                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1031                   0x00, 0x00, 0x00, 0x00, 0x00 }
1032         }, {
1033                 PCI_VENDOR_HIFN,
1034                 PCI_PRODUCT_HIFN_7956,
1035                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1036                   0x00, 0x00, 0x00, 0x00, 0x00 }
1037         }, {
1038                 PCI_VENDOR_NETSEC,
1039                 PCI_PRODUCT_NETSEC_7751,
1040                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1041                   0x00, 0x00, 0x00, 0x00, 0x00 }
1042         }, {
1043                 PCI_VENDOR_INVERTEX,
1044                 PCI_PRODUCT_INVERTEX_AEON,
1045                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1046                   0x00, 0x00, 0x00, 0x00, 0x00 }
1047         }, {
1048                 PCI_VENDOR_HIFN,
1049                 PCI_PRODUCT_HIFN_7811,
1050                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1051                   0x00, 0x00, 0x00, 0x00, 0x00 }
1052         }, {
1053                 /*
1054                  * Other vendors share this PCI ID as well, such as
1055                  * http://www.powercrypt.com, and obviously they also
1056                  * use the same key.
1057                  */
1058                 PCI_VENDOR_HIFN,
1059                 PCI_PRODUCT_HIFN_7751,
1060                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1061                   0x00, 0x00, 0x00, 0x00, 0x00 }
1062         },
1063 };
1064
1065 /*
1066  * Checks to see if crypto is already enabled.  If crypto isn't enable,
1067  * "hifn_enable_crypto" is called to enable it.  The check is important,
1068  * as enabling crypto twice will lock the board.
1069  */
1070 static int 
1071 hifn_enable_crypto(struct hifn_softc *sc)
1072 {
1073         u_int32_t dmacfg, ramcfg, encl, addr, i;
1074         char *offtbl = NULL;
1075
1076         for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
1077                 if (pci2id[i].pci_vendor == pci_get_vendor(sc->sc_dev) &&
1078                     pci2id[i].pci_prod == pci_get_device(sc->sc_dev)) {
1079                         offtbl = pci2id[i].card_id;
1080                         break;
1081                 }
1082         }
1083         if (offtbl == NULL) {
1084                 device_printf(sc->sc_dev, "Unknown card!\n");
1085                 return (1);
1086         }
1087
1088         ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1089         dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
1090
1091         /*
1092          * The RAM config register's encrypt level bit needs to be set before
1093          * every read performed on the encryption level register.
1094          */
1095         WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
1096
1097         encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
1098
1099         /*
1100          * Make sure we don't re-unlock.  Two unlocks kills chip until the
1101          * next reboot.
1102          */
1103         if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
1104 #ifdef HIFN_DEBUG
1105                 if (hifn_debug)
1106                         device_printf(sc->sc_dev,
1107                             "Strong crypto already enabled!\n");
1108 #endif
1109                 goto report;
1110         }
1111
1112         if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
1113 #ifdef HIFN_DEBUG
1114                 if (hifn_debug)
1115                         device_printf(sc->sc_dev,
1116                               "Unknown encryption level 0x%x\n", encl);
1117 #endif
1118                 return 1;
1119         }
1120
1121         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
1122             HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
1123         DELAY(1000);
1124         addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1);
1125         DELAY(1000);
1126         WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0);
1127         DELAY(1000);
1128
1129         for (i = 0; i <= 12; i++) {
1130                 addr = hifn_next_signature(addr, offtbl[i] + 0x101);
1131                 WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr);
1132
1133                 DELAY(1000);
1134         }
1135
1136         WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
1137         encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
1138
1139 #ifdef HIFN_DEBUG
1140         if (hifn_debug) {
1141                 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
1142                         device_printf(sc->sc_dev, "Engine is permanently "
1143                                 "locked until next system reset!\n");
1144                 else
1145                         device_printf(sc->sc_dev, "Engine enabled "
1146                                 "successfully!\n");
1147         }
1148 #endif
1149
1150 report:
1151         WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
1152         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
1153
1154         switch (encl) {
1155         case HIFN_PUSTAT_ENA_1:
1156         case HIFN_PUSTAT_ENA_2:
1157                 break;
1158         case HIFN_PUSTAT_ENA_0:
1159         default:
1160                 device_printf(sc->sc_dev, "disabled");
1161                 break;
1162         }
1163
1164         return 0;
1165 }
1166
1167 /*
1168  * Give initial values to the registers listed in the "Register Space"
1169  * section of the HIFN Software Development reference manual.
1170  */
1171 static void 
1172 hifn_init_pci_registers(struct hifn_softc *sc)
1173 {
1174         /* write fixed values needed by the Initialization registers */
1175         WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
1176         WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
1177         WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
1178
1179         /* write all 4 ring address registers */
1180         WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr +
1181             offsetof(struct hifn_dma, cmdr[0]));
1182         WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr +
1183             offsetof(struct hifn_dma, srcr[0]));
1184         WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr +
1185             offsetof(struct hifn_dma, dstr[0]));
1186         WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr +
1187             offsetof(struct hifn_dma, resr[0]));
1188
1189         DELAY(2000);
1190
1191         /* write status register */
1192         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1193             HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
1194             HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
1195             HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
1196             HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
1197             HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
1198             HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
1199             HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
1200             HIFN_DMACSR_S_WAIT |
1201             HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
1202             HIFN_DMACSR_C_WAIT |
1203             HIFN_DMACSR_ENGINE |
1204             ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
1205                 HIFN_DMACSR_PUBDONE : 0) |
1206             ((sc->sc_flags & HIFN_IS_7811) ?
1207                 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
1208
1209         sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
1210         sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
1211             HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
1212             HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
1213             ((sc->sc_flags & HIFN_IS_7811) ?
1214                 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
1215         sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1216         WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1217
1218         if (sc->sc_flags & HIFN_IS_7956) {
1219                 u_int32_t pll;
1220
1221                 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1222                     HIFN_PUCNFG_TCALLPHASES |
1223                     HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
1224
1225                 /* turn off the clocks and insure bypass is set */
1226                 pll = READ_REG_1(sc, HIFN_1_PLL);
1227                 pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL))
1228                     | HIFN_PLL_BP | HIFN_PLL_MBSET;
1229                 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1230                 DELAY(10*1000);         /* 10ms */
1231                 /* change configuration */
1232                 pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig;
1233                 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1234                 DELAY(10*1000);         /* 10ms */
1235                 /* disable bypass */
1236                 pll &= ~HIFN_PLL_BP;
1237                 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1238                 /* enable clocks with new configuration */
1239                 pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL;
1240                 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1241         } else {
1242                 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1243                     HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
1244                     HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
1245                     (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
1246         }
1247
1248         WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1249         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
1250             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
1251             ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
1252             ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
1253 }
1254
1255 /*
1256  * The maximum number of sessions supported by the card
1257  * is dependent on the amount of context ram, which
1258  * encryption algorithms are enabled, and how compression
1259  * is configured.  This should be configured before this
1260  * routine is called.
1261  */
1262 static void
1263 hifn_sessions(struct hifn_softc *sc)
1264 {
1265         u_int32_t pucnfg;
1266         int ctxsize;
1267
1268         pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1269
1270         if (pucnfg & HIFN_PUCNFG_COMPSING) {
1271                 if (pucnfg & HIFN_PUCNFG_ENCCNFG)
1272                         ctxsize = 128;
1273                 else
1274                         ctxsize = 512;
1275                 /*
1276                  * 7955/7956 has internal context memory of 32K
1277                  */
1278                 if (sc->sc_flags & HIFN_IS_7956)
1279                         sc->sc_maxses = 32768 / ctxsize;
1280                 else
1281                         sc->sc_maxses = 1 +
1282                             ((sc->sc_ramsize - 32768) / ctxsize);
1283         } else
1284                 sc->sc_maxses = sc->sc_ramsize / 16384;
1285
1286         if (sc->sc_maxses > 2048)
1287                 sc->sc_maxses = 2048;
1288 }
1289
1290 /*
1291  * Determine ram type (sram or dram).  Board should be just out of a reset
1292  * state when this is called.
1293  */
1294 static int
1295 hifn_ramtype(struct hifn_softc *sc)
1296 {
1297         u_int8_t data[8], dataexpect[8];
1298         int i;
1299
1300         for (i = 0; i < sizeof(data); i++)
1301                 data[i] = dataexpect[i] = 0x55;
1302         if (hifn_writeramaddr(sc, 0, data))
1303                 return (-1);
1304         if (hifn_readramaddr(sc, 0, data))
1305                 return (-1);
1306         if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1307                 sc->sc_drammodel = 1;
1308                 return (0);
1309         }
1310
1311         for (i = 0; i < sizeof(data); i++)
1312                 data[i] = dataexpect[i] = 0xaa;
1313         if (hifn_writeramaddr(sc, 0, data))
1314                 return (-1);
1315         if (hifn_readramaddr(sc, 0, data))
1316                 return (-1);
1317         if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1318                 sc->sc_drammodel = 1;
1319                 return (0);
1320         }
1321
1322         return (0);
1323 }
1324
1325 #define HIFN_SRAM_MAX           (32 << 20)
1326 #define HIFN_SRAM_STEP_SIZE     16384
1327 #define HIFN_SRAM_GRANULARITY   (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1328
1329 static int
1330 hifn_sramsize(struct hifn_softc *sc)
1331 {
1332         u_int32_t a;
1333         u_int8_t data[8];
1334         u_int8_t dataexpect[sizeof(data)];
1335         int32_t i;
1336
1337         for (i = 0; i < sizeof(data); i++)
1338                 data[i] = dataexpect[i] = i ^ 0x5a;
1339
1340         for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
1341                 a = i * HIFN_SRAM_STEP_SIZE;
1342                 bcopy(&i, data, sizeof(i));
1343                 hifn_writeramaddr(sc, a, data);
1344         }
1345
1346         for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1347                 a = i * HIFN_SRAM_STEP_SIZE;
1348                 bcopy(&i, dataexpect, sizeof(i));
1349                 if (hifn_readramaddr(sc, a, data) < 0)
1350                         return (0);
1351                 if (bcmp(data, dataexpect, sizeof(data)) != 0)
1352                         return (0);
1353                 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
1354         }
1355
1356         return (0);
1357 }
1358
1359 /*
1360  * XXX For dram boards, one should really try all of the
1361  * HIFN_PUCNFG_DSZ_*'s.  This just assumes that PUCNFG
1362  * is already set up correctly.
1363  */
1364 static int
1365 hifn_dramsize(struct hifn_softc *sc)
1366 {
1367         u_int32_t cnfg;
1368         
1369         if (sc->sc_flags & HIFN_IS_7956) {
1370                 /*
1371                  * 7955/7956 have a fixed internal ram of only 32K.
1372                  */
1373                 sc->sc_ramsize = 32768;
1374         } else {
1375                 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1376                     HIFN_PUCNFG_DRAMMASK;
1377                 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1378         }
1379         return (0);
1380 }
1381
1382 static void
1383 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp)
1384 {
1385         struct hifn_dma *dma = sc->sc_dma;
1386
1387         if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1388                 dma->cmdi = 0;
1389                 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1390                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1391                 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1392                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1393         }
1394         *cmdp = dma->cmdi++;
1395         dma->cmdk = dma->cmdi;
1396
1397         if (dma->srci == HIFN_D_SRC_RSIZE) {
1398                 dma->srci = 0;
1399                 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
1400                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1401                 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1402                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1403         }
1404         *srcp = dma->srci++;
1405         dma->srck = dma->srci;
1406
1407         if (dma->dsti == HIFN_D_DST_RSIZE) {
1408                 dma->dsti = 0;
1409                 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
1410                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1411                 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1412                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1413         }
1414         *dstp = dma->dsti++;
1415         dma->dstk = dma->dsti;
1416
1417         if (dma->resi == HIFN_D_RES_RSIZE) {
1418                 dma->resi = 0;
1419                 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1420                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1421                 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1422                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1423         }
1424         *resp = dma->resi++;
1425         dma->resk = dma->resi;
1426 }
1427
1428 static int
1429 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1430 {
1431         struct hifn_dma *dma = sc->sc_dma;
1432         hifn_base_command_t wc;
1433         const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1434         int r, cmdi, resi, srci, dsti;
1435
1436         wc.masks = htole16(3 << 13);
1437         wc.session_num = htole16(addr >> 14);
1438         wc.total_source_count = htole16(8);
1439         wc.total_dest_count = htole16(addr & 0x3fff);
1440
1441         hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1442
1443         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1444             HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1445             HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1446
1447         /* build write command */
1448         bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1449         *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc;
1450         bcopy(data, &dma->test_src, sizeof(dma->test_src));
1451
1452         dma->srcr[srci].p = htole32(sc->sc_dma_physaddr
1453             + offsetof(struct hifn_dma, test_src));
1454         dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr
1455             + offsetof(struct hifn_dma, test_dst));
1456
1457         dma->cmdr[cmdi].l = htole32(16 | masks);
1458         dma->srcr[srci].l = htole32(8 | masks);
1459         dma->dstr[dsti].l = htole32(4 | masks);
1460         dma->resr[resi].l = htole32(4 | masks);
1461
1462         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1463             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1464
1465         for (r = 10000; r >= 0; r--) {
1466                 DELAY(10);
1467                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1468                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1469                 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1470                         break;
1471                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1472                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1473         }
1474         if (r == 0) {
1475                 device_printf(sc->sc_dev, "writeramaddr -- "
1476                     "result[%d](addr %d) still valid\n", resi, addr);
1477                 r = -1;
1478                 return (-1);
1479         } else
1480                 r = 0;
1481
1482         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1483             HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1484             HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1485
1486         return (r);
1487 }
1488
1489 static int
1490 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1491 {
1492         struct hifn_dma *dma = sc->sc_dma;
1493         hifn_base_command_t rc;
1494         const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1495         int r, cmdi, srci, dsti, resi;
1496
1497         rc.masks = htole16(2 << 13);
1498         rc.session_num = htole16(addr >> 14);
1499         rc.total_source_count = htole16(addr & 0x3fff);
1500         rc.total_dest_count = htole16(8);
1501
1502         hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1503
1504         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1505             HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1506             HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1507
1508         bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1509         *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc;
1510
1511         dma->srcr[srci].p = htole32(sc->sc_dma_physaddr +
1512             offsetof(struct hifn_dma, test_src));
1513         dma->test_src = 0;
1514         dma->dstr[dsti].p =  htole32(sc->sc_dma_physaddr +
1515             offsetof(struct hifn_dma, test_dst));
1516         dma->test_dst = 0;
1517         dma->cmdr[cmdi].l = htole32(8 | masks);
1518         dma->srcr[srci].l = htole32(8 | masks);
1519         dma->dstr[dsti].l = htole32(8 | masks);
1520         dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1521
1522         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1523             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1524
1525         for (r = 10000; r >= 0; r--) {
1526                 DELAY(10);
1527                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1528                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1529                 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1530                         break;
1531                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1532                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1533         }
1534         if (r == 0) {
1535                 device_printf(sc->sc_dev, "readramaddr -- "
1536                     "result[%d](addr %d) still valid\n", resi, addr);
1537                 r = -1;
1538         } else {
1539                 r = 0;
1540                 bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
1541         }
1542
1543         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1544             HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1545             HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1546
1547         return (r);
1548 }
1549
1550 /*
1551  * Initialize the descriptor rings.
1552  */
1553 static void 
1554 hifn_init_dma(struct hifn_softc *sc)
1555 {
1556         struct hifn_dma *dma = sc->sc_dma;
1557         int i;
1558
1559         hifn_set_retry(sc);
1560
1561         /* initialize static pointer values */
1562         for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1563                 dma->cmdr[i].p = htole32(sc->sc_dma_physaddr +
1564                     offsetof(struct hifn_dma, command_bufs[i][0]));
1565         for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1566                 dma->resr[i].p = htole32(sc->sc_dma_physaddr +
1567                     offsetof(struct hifn_dma, result_bufs[i][0]));
1568
1569         dma->cmdr[HIFN_D_CMD_RSIZE].p =
1570             htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0]));
1571         dma->srcr[HIFN_D_SRC_RSIZE].p =
1572             htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0]));
1573         dma->dstr[HIFN_D_DST_RSIZE].p =
1574             htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0]));
1575         dma->resr[HIFN_D_RES_RSIZE].p =
1576             htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0]));
1577
1578         dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
1579         dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
1580         dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
1581 }
1582
1583 /*
1584  * Writes out the raw command buffer space.  Returns the
1585  * command buffer size.
1586  */
1587 static u_int
1588 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
1589 {
1590         u_int8_t *buf_pos;
1591         hifn_base_command_t *base_cmd;
1592         hifn_mac_command_t *mac_cmd;
1593         hifn_crypt_command_t *cry_cmd;
1594         int using_mac, using_crypt, len, ivlen;
1595         u_int32_t dlen, slen;
1596
1597         buf_pos = buf;
1598         using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1599         using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1600
1601         base_cmd = (hifn_base_command_t *)buf_pos;
1602         base_cmd->masks = htole16(cmd->base_masks);
1603         slen = cmd->src_mapsize;
1604         if (cmd->sloplen)
1605                 dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t);
1606         else
1607                 dlen = cmd->dst_mapsize;
1608         base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1609         base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1610         dlen >>= 16;
1611         slen >>= 16;
1612
1613         base_cmd->session_num = htole16(
1614             ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1615             ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1616         buf_pos += sizeof(hifn_base_command_t);
1617
1618         if (using_mac) {
1619                 mac_cmd = (hifn_mac_command_t *)buf_pos;
1620                 dlen = cmd->maccrd->crd_len;
1621                 mac_cmd->source_count = htole16(dlen & 0xffff);
1622                 dlen >>= 16;
1623                 mac_cmd->masks = htole16(cmd->mac_masks |
1624                     ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1625                 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1626                 mac_cmd->reserved = 0;
1627                 buf_pos += sizeof(hifn_mac_command_t);
1628         }
1629
1630         if (using_crypt) {
1631                 cry_cmd = (hifn_crypt_command_t *)buf_pos;
1632                 dlen = cmd->enccrd->crd_len;
1633                 cry_cmd->source_count = htole16(dlen & 0xffff);
1634                 dlen >>= 16;
1635                 cry_cmd->masks = htole16(cmd->cry_masks |
1636                     ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1637                 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1638                 cry_cmd->reserved = 0;
1639                 buf_pos += sizeof(hifn_crypt_command_t);
1640         }
1641
1642         if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1643                 bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
1644                 buf_pos += HIFN_MAC_KEY_LENGTH;
1645         }
1646
1647         if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1648                 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1649                 case HIFN_CRYPT_CMD_ALG_3DES:
1650                         bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
1651                         buf_pos += HIFN_3DES_KEY_LENGTH;
1652                         break;
1653                 case HIFN_CRYPT_CMD_ALG_DES:
1654                         bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
1655                         buf_pos += HIFN_DES_KEY_LENGTH;
1656                         break;
1657                 case HIFN_CRYPT_CMD_ALG_RC4:
1658                         len = 256;
1659                         do {
1660                                 int clen;
1661
1662                                 clen = MIN(cmd->cklen, len);
1663                                 bcopy(cmd->ck, buf_pos, clen);
1664                                 len -= clen;
1665                                 buf_pos += clen;
1666                         } while (len > 0);
1667                         bzero(buf_pos, 4);
1668                         buf_pos += 4;
1669                         break;
1670                 case HIFN_CRYPT_CMD_ALG_AES:
1671                         /*
1672                          * AES keys are variable 128, 192 and
1673                          * 256 bits (16, 24 and 32 bytes).
1674                          */
1675                         bcopy(cmd->ck, buf_pos, cmd->cklen);
1676                         buf_pos += cmd->cklen;
1677                         break;
1678                 }
1679         }
1680
1681         if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1682                 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1683                 case HIFN_CRYPT_CMD_ALG_AES:
1684                         ivlen = HIFN_AES_IV_LENGTH;
1685                         break;
1686                 default:
1687                         ivlen = HIFN_IV_LENGTH;
1688                         break;
1689                 }
1690                 bcopy(cmd->iv, buf_pos, ivlen);
1691                 buf_pos += ivlen;
1692         }
1693
1694         if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) {
1695                 bzero(buf_pos, 8);
1696                 buf_pos += 8;
1697         }
1698
1699         return (buf_pos - buf);
1700 #undef  MIN
1701 }
1702
1703 static int
1704 hifn_dmamap_aligned(struct hifn_operand *op)
1705 {
1706         int i;
1707
1708         for (i = 0; i < op->nsegs; i++) {
1709                 if (op->segs[i].ds_addr & 3)
1710                         return (0);
1711                 if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3))
1712                         return (0);
1713         }
1714         return (1);
1715 }
1716
1717 static __inline int
1718 hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx)
1719 {
1720         struct hifn_dma *dma = sc->sc_dma;
1721
1722         if (++idx == HIFN_D_DST_RSIZE) {
1723                 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1724                     HIFN_D_MASKDONEIRQ);
1725                 HIFN_DSTR_SYNC(sc, idx,
1726                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1727                 idx = 0;
1728         }
1729         return (idx);
1730 }
1731
1732 static int
1733 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1734 {
1735         struct hifn_dma *dma = sc->sc_dma;
1736         struct hifn_operand *dst = &cmd->dst;
1737         u_int32_t p, l;
1738         int idx, used = 0, i;
1739
1740         idx = dma->dsti;
1741         for (i = 0; i < dst->nsegs - 1; i++) {
1742                 dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1743                 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1744                     HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len);
1745                 HIFN_DSTR_SYNC(sc, idx,
1746                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1747                 used++;
1748
1749                 idx = hifn_dmamap_dstwrap(sc, idx);
1750         }
1751
1752         if (cmd->sloplen == 0) {
1753                 p = dst->segs[i].ds_addr;
1754                 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1755                     dst->segs[i].ds_len;
1756         } else {
1757                 p = sc->sc_dma_physaddr +
1758                     offsetof(struct hifn_dma, slop[cmd->slopidx]);
1759                 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1760                     sizeof(u_int32_t);
1761
1762                 if ((dst->segs[i].ds_len - cmd->sloplen) != 0) {
1763                         dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1764                         dma->dstr[idx].l = htole32(HIFN_D_VALID |
1765                             HIFN_D_MASKDONEIRQ |
1766                             (dst->segs[i].ds_len - cmd->sloplen));
1767                         HIFN_DSTR_SYNC(sc, idx,
1768                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1769                         used++;
1770
1771                         idx = hifn_dmamap_dstwrap(sc, idx);
1772                 }
1773         }
1774         dma->dstr[idx].p = htole32(p);
1775         dma->dstr[idx].l = htole32(l);
1776         HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1777         used++;
1778
1779         idx = hifn_dmamap_dstwrap(sc, idx);
1780
1781         dma->dsti = idx;
1782         dma->dstu += used;
1783         return (idx);
1784 }
1785
1786 static __inline int
1787 hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx)
1788 {
1789         struct hifn_dma *dma = sc->sc_dma;
1790
1791         if (++idx == HIFN_D_SRC_RSIZE) {
1792                 dma->srcr[idx].l = htole32(HIFN_D_VALID |
1793                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1794                 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1795                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1796                 idx = 0;
1797         }
1798         return (idx);
1799 }
1800
1801 static int
1802 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1803 {
1804         struct hifn_dma *dma = sc->sc_dma;
1805         struct hifn_operand *src = &cmd->src;
1806         int idx, i;
1807         u_int32_t last = 0;
1808
1809         idx = dma->srci;
1810         for (i = 0; i < src->nsegs; i++) {
1811                 if (i == src->nsegs - 1)
1812                         last = HIFN_D_LAST;
1813
1814                 dma->srcr[idx].p = htole32(src->segs[i].ds_addr);
1815                 dma->srcr[idx].l = htole32(src->segs[i].ds_len |
1816                     HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1817                 HIFN_SRCR_SYNC(sc, idx,
1818                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1819
1820                 idx = hifn_dmamap_srcwrap(sc, idx);
1821         }
1822         dma->srci = idx;
1823         dma->srcu += src->nsegs;
1824         return (idx);
1825
1826
1827 static void
1828 hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error)
1829 {
1830         struct hifn_operand *op = arg;
1831
1832         KASSERT(nsegs <= MAX_SCATTER,
1833                 ("hifn_op_cb: too many DMA segments (%u > %u) "
1834                  "returned when mapping operand", nsegs, MAX_SCATTER));
1835         op->mapsize = mapsize;
1836         op->nsegs = nsegs;
1837         bcopy(seg, op->segs, nsegs * sizeof (seg[0]));
1838 }
1839
1840 static int 
1841 hifn_crypto(
1842         struct hifn_softc *sc,
1843         struct hifn_command *cmd,
1844         struct cryptop *crp,
1845         int hint)
1846 {
1847         struct  hifn_dma *dma = sc->sc_dma;
1848         u_int32_t cmdlen, csr;
1849         int cmdi, resi, err = 0;
1850
1851         /*
1852          * need 1 cmd, and 1 res
1853          *
1854          * NB: check this first since it's easy.
1855          */
1856         HIFN_LOCK(sc);
1857         if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
1858             (dma->resu + 1) > HIFN_D_RES_RSIZE) {
1859 #ifdef HIFN_DEBUG
1860                 if (hifn_debug) {
1861                         device_printf(sc->sc_dev,
1862                                 "cmd/result exhaustion, cmdu %u resu %u\n",
1863                                 dma->cmdu, dma->resu);
1864                 }
1865 #endif
1866                 hifnstats.hst_nomem_cr++;
1867                 HIFN_UNLOCK(sc);
1868                 return (ERESTART);
1869         }
1870
1871         if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->src_map)) {
1872                 hifnstats.hst_nomem_map++;
1873                 HIFN_UNLOCK(sc);
1874                 return (ENOMEM);
1875         }
1876
1877         if (crp->crp_flags & CRYPTO_F_IMBUF) {
1878                 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
1879                     cmd->src_m, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) {
1880                         hifnstats.hst_nomem_load++;
1881                         err = ENOMEM;
1882                         goto err_srcmap1;
1883                 }
1884         } else if (crp->crp_flags & CRYPTO_F_IOV) {
1885 #if 0
1886                 cmd->src_io->uio_segflg = UIO_USERSPACE;
1887 #endif
1888                 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
1889                     cmd->src_io, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) {
1890                         hifnstats.hst_nomem_load++;
1891                         err = ENOMEM;
1892                         goto err_srcmap1;
1893                 }
1894         } else {
1895                 err = EINVAL;
1896                 goto err_srcmap1;
1897         }
1898
1899         if (hifn_dmamap_aligned(&cmd->src)) {
1900                 cmd->sloplen = cmd->src_mapsize & 3;
1901                 cmd->dst = cmd->src;
1902         } else {
1903                 if (crp->crp_flags & CRYPTO_F_IOV) {
1904                         err = EINVAL;
1905                         goto err_srcmap;
1906                 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1907                         int totlen, len;
1908                         struct mbuf *m, *m0, *mlast;
1909
1910                         KASSERT(cmd->dst_m == cmd->src_m,
1911                                 ("hifn_crypto: dst_m initialized improperly"));
1912                         hifnstats.hst_unaligned++;
1913                         /*
1914                          * Source is not aligned on a longword boundary.
1915                          * Copy the data to insure alignment.  If we fail
1916                          * to allocate mbufs or clusters while doing this
1917                          * we return ERESTART so the operation is requeued
1918                          * at the crypto later, but only if there are
1919                          * ops already posted to the hardware; otherwise we
1920                          * have no guarantee that we'll be re-entered.
1921                          */
1922                         totlen = cmd->src_mapsize;
1923                         if (cmd->src_m->m_flags & M_PKTHDR) {
1924                                 len = MHLEN;
1925                                 MGETHDR(m0, MB_DONTWAIT, MT_DATA);
1926                                 if (m0 && !m_dup_pkthdr(m0, cmd->src_m, MB_DONTWAIT)) {
1927                                         m_free(m0);
1928                                         m0 = NULL;
1929                                 }
1930                         } else {
1931                                 len = MLEN;
1932                                 MGET(m0, MB_DONTWAIT, MT_DATA);
1933                         }
1934                         if (m0 == NULL) {
1935                                 hifnstats.hst_nomem_mbuf++;
1936                                 err = dma->cmdu ? ERESTART : ENOMEM;
1937                                 goto err_srcmap;
1938                         }
1939                         if (totlen >= MINCLSIZE) {
1940                                 MCLGET(m0, MB_DONTWAIT);
1941                                 if ((m0->m_flags & M_EXT) == 0) {
1942                                         hifnstats.hst_nomem_mcl++;
1943                                         err = dma->cmdu ? ERESTART : ENOMEM;
1944                                         m_freem(m0);
1945                                         goto err_srcmap;
1946                                 }
1947                                 len = MCLBYTES;
1948                         }
1949                         totlen -= len;
1950                         m0->m_pkthdr.len = m0->m_len = len;
1951                         mlast = m0;
1952
1953                         while (totlen > 0) {
1954                                 MGET(m, MB_DONTWAIT, MT_DATA);
1955                                 if (m == NULL) {
1956                                         hifnstats.hst_nomem_mbuf++;
1957                                         err = dma->cmdu ? ERESTART : ENOMEM;
1958                                         m_freem(m0);
1959                                         goto err_srcmap;
1960                                 }
1961                                 len = MLEN;
1962                                 if (totlen >= MINCLSIZE) {
1963                                         MCLGET(m, MB_DONTWAIT);
1964                                         if ((m->m_flags & M_EXT) == 0) {
1965                                                 hifnstats.hst_nomem_mcl++;
1966                                                 err = dma->cmdu ? ERESTART : ENOMEM;
1967                                                 mlast->m_next = m;
1968                                                 m_freem(m0);
1969                                                 goto err_srcmap;
1970                                         }
1971                                         len = MCLBYTES;
1972                                 }
1973
1974                                 m->m_len = len;
1975                                 m0->m_pkthdr.len += len;
1976                                 totlen -= len;
1977
1978                                 mlast->m_next = m;
1979                                 mlast = m;
1980                         }
1981                         cmd->dst_m = m0;
1982                 }
1983         }
1984
1985         if (cmd->dst_map == NULL) {
1986                 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->dst_map)) {
1987                         hifnstats.hst_nomem_map++;
1988                         err = ENOMEM;
1989                         goto err_srcmap;
1990                 }
1991                 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1992                         if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
1993                             cmd->dst_m, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) {
1994                                 hifnstats.hst_nomem_map++;
1995                                 err = ENOMEM;
1996                                 goto err_dstmap1;
1997                         }
1998                 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1999 #if 0
2000                         cmd->dst_io->uio_segflg |= UIO_USERSPACE;
2001 #endif
2002                         if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
2003                             cmd->dst_io, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) {
2004                                 hifnstats.hst_nomem_load++;
2005                                 err = ENOMEM;
2006                                 goto err_dstmap1;
2007                         }
2008                 }
2009         }
2010
2011 #ifdef HIFN_DEBUG
2012         if (hifn_debug) {
2013                 device_printf(sc->sc_dev,
2014                     "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
2015                     READ_REG_1(sc, HIFN_1_DMA_CSR),
2016                     READ_REG_1(sc, HIFN_1_DMA_IER),
2017                     dma->cmdu, dma->srcu, dma->dstu, dma->resu,
2018                     cmd->src_nsegs, cmd->dst_nsegs);
2019         }
2020 #endif
2021
2022         if (cmd->src_map == cmd->dst_map) {
2023                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2024                     BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2025         } else {
2026                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2027                     BUS_DMASYNC_PREWRITE);
2028                 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2029                     BUS_DMASYNC_PREREAD);
2030         }
2031
2032         /*
2033          * need N src, and N dst
2034          */
2035         if ((dma->srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE ||
2036             (dma->dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) {
2037 #ifdef HIFN_DEBUG
2038                 if (hifn_debug) {
2039                         device_printf(sc->sc_dev,
2040                                 "src/dst exhaustion, srcu %u+%u dstu %u+%u\n",
2041                                 dma->srcu, cmd->src_nsegs,
2042                                 dma->dstu, cmd->dst_nsegs);
2043                 }
2044 #endif
2045                 hifnstats.hst_nomem_sd++;
2046                 err = ERESTART;
2047                 goto err_dstmap;
2048         }
2049
2050         if (dma->cmdi == HIFN_D_CMD_RSIZE) {
2051                 dma->cmdi = 0;
2052                 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
2053                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2054                 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
2055                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2056         }
2057         cmdi = dma->cmdi++;
2058         cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
2059         HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
2060
2061         /* .p for command/result already set */
2062         dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
2063             HIFN_D_MASKDONEIRQ);
2064         HIFN_CMDR_SYNC(sc, cmdi,
2065             BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2066         dma->cmdu++;
2067
2068         /*
2069          * We don't worry about missing an interrupt (which a "command wait"
2070          * interrupt salvages us from), unless there is more than one command
2071          * in the queue.
2072          */
2073         if (dma->cmdu > 1) {
2074                 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
2075                 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2076         }
2077
2078         hifnstats.hst_ipackets++;
2079         hifnstats.hst_ibytes += cmd->src_mapsize;
2080
2081         hifn_dmamap_load_src(sc, cmd);
2082
2083         /*
2084          * Unlike other descriptors, we don't mask done interrupt from
2085          * result descriptor.
2086          */
2087 #ifdef HIFN_DEBUG
2088         if (hifn_debug)
2089                 kprintf("load res\n");
2090 #endif
2091         if (dma->resi == HIFN_D_RES_RSIZE) {
2092                 dma->resi = 0;
2093                 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
2094                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2095                 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
2096                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2097         }
2098         resi = dma->resi++;
2099         KASSERT(dma->hifn_commands[resi] == NULL,
2100                 ("hifn_crypto: command slot %u busy", resi));
2101         dma->hifn_commands[resi] = cmd;
2102         HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
2103         if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) {
2104                 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2105                     HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
2106                 sc->sc_curbatch++;
2107                 if (sc->sc_curbatch > hifnstats.hst_maxbatch)
2108                         hifnstats.hst_maxbatch = sc->sc_curbatch;
2109                 hifnstats.hst_totbatch++;
2110         } else {
2111                 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2112                     HIFN_D_VALID | HIFN_D_LAST);
2113                 sc->sc_curbatch = 0;
2114         }
2115         HIFN_RESR_SYNC(sc, resi,
2116             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2117         dma->resu++;
2118
2119         if (cmd->sloplen)
2120                 cmd->slopidx = resi;
2121
2122         hifn_dmamap_load_dst(sc, cmd);
2123
2124         csr = 0;
2125         if (sc->sc_c_busy == 0) {
2126                 csr |= HIFN_DMACSR_C_CTRL_ENA;
2127                 sc->sc_c_busy = 1;
2128         }
2129         if (sc->sc_s_busy == 0) {
2130                 csr |= HIFN_DMACSR_S_CTRL_ENA;
2131                 sc->sc_s_busy = 1;
2132         }
2133         if (sc->sc_r_busy == 0) {
2134                 csr |= HIFN_DMACSR_R_CTRL_ENA;
2135                 sc->sc_r_busy = 1;
2136         }
2137         if (sc->sc_d_busy == 0) {
2138                 csr |= HIFN_DMACSR_D_CTRL_ENA;
2139                 sc->sc_d_busy = 1;
2140         }
2141         if (csr)
2142                 WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr);
2143
2144 #ifdef HIFN_DEBUG
2145         if (hifn_debug) {
2146                 device_printf(sc->sc_dev, "command: stat %8x ier %8x\n",
2147                     READ_REG_1(sc, HIFN_1_DMA_CSR),
2148                     READ_REG_1(sc, HIFN_1_DMA_IER));
2149         }
2150 #endif
2151
2152         sc->sc_active = 5;
2153         HIFN_UNLOCK(sc);
2154         KASSERT(err == 0, ("hifn_crypto: success with error %u", err));
2155         return (err);           /* success */
2156
2157 err_dstmap:
2158         if (cmd->src_map != cmd->dst_map)
2159                 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2160 err_dstmap1:
2161         if (cmd->src_map != cmd->dst_map)
2162                 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2163 err_srcmap:
2164         if (crp->crp_flags & CRYPTO_F_IMBUF) {
2165                 if (cmd->src_m != cmd->dst_m)
2166                         m_freem(cmd->dst_m);
2167         }
2168         bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2169 err_srcmap1:
2170         bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2171         HIFN_UNLOCK(sc);
2172         return (err);
2173 }
2174
2175 static void
2176 hifn_tick(void* vsc)
2177 {
2178         struct hifn_softc *sc = vsc;
2179
2180         HIFN_LOCK(sc);
2181         if (sc->sc_active == 0) {
2182                 struct hifn_dma *dma = sc->sc_dma;
2183                 u_int32_t r = 0;
2184
2185                 if (dma->cmdu == 0 && sc->sc_c_busy) {
2186                         sc->sc_c_busy = 0;
2187                         r |= HIFN_DMACSR_C_CTRL_DIS;
2188                 }
2189                 if (dma->srcu == 0 && sc->sc_s_busy) {
2190                         sc->sc_s_busy = 0;
2191                         r |= HIFN_DMACSR_S_CTRL_DIS;
2192                 }
2193                 if (dma->dstu == 0 && sc->sc_d_busy) {
2194                         sc->sc_d_busy = 0;
2195                         r |= HIFN_DMACSR_D_CTRL_DIS;
2196                 }
2197                 if (dma->resu == 0 && sc->sc_r_busy) {
2198                         sc->sc_r_busy = 0;
2199                         r |= HIFN_DMACSR_R_CTRL_DIS;
2200                 }
2201                 if (r)
2202                         WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
2203         } else
2204                 sc->sc_active--;
2205         HIFN_UNLOCK(sc);
2206         callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
2207 }
2208
2209 static void 
2210 hifn_intr(void *arg)
2211 {
2212         struct hifn_softc *sc = arg;
2213         struct hifn_dma *dma;
2214         u_int32_t dmacsr, restart;
2215         int i, u;
2216
2217         dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
2218         
2219         /* Nothing in the DMA unit interrupted */
2220         if ((dmacsr & sc->sc_dmaier) == 0) {
2221                 hifnstats.hst_noirq++;
2222                 return;
2223         }
2224
2225         HIFN_LOCK(sc);
2226
2227         dma = sc->sc_dma;
2228
2229 #ifdef HIFN_DEBUG
2230         if (hifn_debug) {
2231                 device_printf(sc->sc_dev,
2232                     "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n",
2233                     dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier,
2234                     dma->cmdi, dma->srci, dma->dsti, dma->resi,
2235                     dma->cmdk, dma->srck, dma->dstk, dma->resk,
2236                     dma->cmdu, dma->srcu, dma->dstu, dma->resu);
2237         }
2238 #endif
2239
2240         WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
2241
2242         if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
2243             (dmacsr & HIFN_DMACSR_PUBDONE))
2244                 WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
2245                     READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
2246
2247         restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER);
2248         if (restart)
2249                 device_printf(sc->sc_dev, "overrun %x\n", dmacsr);
2250
2251         if (sc->sc_flags & HIFN_IS_7811) {
2252                 if (dmacsr & HIFN_DMACSR_ILLR)
2253                         device_printf(sc->sc_dev, "illegal read\n");
2254                 if (dmacsr & HIFN_DMACSR_ILLW)
2255                         device_printf(sc->sc_dev, "illegal write\n");
2256         }
2257
2258         restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
2259             HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
2260         if (restart) {
2261                 device_printf(sc->sc_dev, "abort, resetting.\n");
2262                 hifnstats.hst_abort++;
2263                 hifn_abort(sc);
2264                 HIFN_UNLOCK(sc);
2265                 return;
2266         }
2267
2268         if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
2269                 /*
2270                  * If no slots to process and we receive a "waiting on
2271                  * command" interrupt, we disable the "waiting on command"
2272                  * (by clearing it).
2273                  */
2274                 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
2275                 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2276         }
2277
2278         /* clear the rings */
2279         i = dma->resk; u = dma->resu;
2280         while (u != 0) {
2281                 HIFN_RESR_SYNC(sc, i,
2282                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2283                 if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
2284                         HIFN_RESR_SYNC(sc, i,
2285                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2286                         break;
2287                 }
2288
2289                 if (i != HIFN_D_RES_RSIZE) {
2290                         struct hifn_command *cmd;
2291                         u_int8_t *macbuf = NULL;
2292
2293                         HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
2294                         cmd = dma->hifn_commands[i];
2295                         KASSERT(cmd != NULL,
2296                                 ("hifn_intr: null command slot %u", i));
2297                         dma->hifn_commands[i] = NULL;
2298
2299                         if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2300                                 macbuf = dma->result_bufs[i];
2301                                 macbuf += 12;
2302                         }
2303
2304                         hifn_callback(sc, cmd, macbuf);
2305                         hifnstats.hst_opackets++;
2306                         u--;
2307                 }
2308
2309                 if (++i == (HIFN_D_RES_RSIZE + 1))
2310                         i = 0;
2311         }
2312         dma->resk = i; dma->resu = u;
2313
2314         i = dma->srck; u = dma->srcu;
2315         while (u != 0) {
2316                 if (i == HIFN_D_SRC_RSIZE)
2317                         i = 0;
2318                 HIFN_SRCR_SYNC(sc, i,
2319                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2320                 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
2321                         HIFN_SRCR_SYNC(sc, i,
2322                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2323                         break;
2324                 }
2325                 i++, u--;
2326         }
2327         dma->srck = i; dma->srcu = u;
2328
2329         i = dma->cmdk; u = dma->cmdu;
2330         while (u != 0) {
2331                 HIFN_CMDR_SYNC(sc, i,
2332                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2333                 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
2334                         HIFN_CMDR_SYNC(sc, i,
2335                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2336                         break;
2337                 }
2338                 if (i != HIFN_D_CMD_RSIZE) {
2339                         u--;
2340                         HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
2341                 }
2342                 if (++i == (HIFN_D_CMD_RSIZE + 1))
2343                         i = 0;
2344         }
2345         dma->cmdk = i; dma->cmdu = u;
2346
2347         HIFN_UNLOCK(sc);
2348
2349         if (sc->sc_needwakeup) {                /* XXX check high watermark */
2350                 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
2351 #ifdef HIFN_DEBUG
2352                 if (hifn_debug)
2353                         device_printf(sc->sc_dev,
2354                                 "wakeup crypto (%x) u %d/%d/%d/%d\n",
2355                                 sc->sc_needwakeup,
2356                                 dma->cmdu, dma->srcu, dma->dstu, dma->resu);
2357 #endif
2358                 sc->sc_needwakeup &= ~wakeup;
2359                 crypto_unblock(sc->sc_cid, wakeup);
2360         }
2361 }
2362
2363 /*
2364  * Allocate a new 'session' and return an encoded session id.  'sidp'
2365  * contains our registration id, and should contain an encoded session
2366  * id on successful allocation.
2367  */
2368 static int
2369 hifn_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
2370 {
2371         struct cryptoini *c;
2372         struct hifn_softc *sc = device_get_softc(dev);
2373         int mac = 0, cry = 0, sesn;
2374         struct hifn_session *ses = NULL;
2375
2376         KASSERT(sc != NULL, ("hifn_newsession: null softc"));
2377         if (sidp == NULL || cri == NULL || sc == NULL)
2378                 return (EINVAL);
2379
2380         HIFN_LOCK(sc);
2381         if (sc->sc_sessions == NULL) {
2382                 ses = sc->sc_sessions = (struct hifn_session *)kmalloc(
2383                     sizeof(*ses), M_DEVBUF, M_NOWAIT);
2384                 if (ses == NULL) {
2385                         HIFN_UNLOCK(sc);
2386                         return (ENOMEM);
2387                 }
2388                 sesn = 0;
2389                 sc->sc_nsessions = 1;
2390         } else {
2391                 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
2392                         if (!sc->sc_sessions[sesn].hs_used) {
2393                                 ses = &sc->sc_sessions[sesn];
2394                                 break;
2395                         }
2396                 }
2397
2398                 if (ses == NULL) {
2399                         sesn = sc->sc_nsessions;
2400                         ses = (struct hifn_session *)kmalloc((sesn + 1) *
2401                             sizeof(*ses), M_DEVBUF, M_NOWAIT);
2402                         if (ses == NULL) {
2403                                 HIFN_UNLOCK(sc);
2404                                 return (ENOMEM);
2405                         }
2406                         bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses));
2407                         bzero(sc->sc_sessions, sesn * sizeof(*ses));
2408                         kfree(sc->sc_sessions, M_DEVBUF);
2409                         sc->sc_sessions = ses;
2410                         ses = &sc->sc_sessions[sesn];
2411                         sc->sc_nsessions++;
2412                 }
2413         }
2414         HIFN_UNLOCK(sc);
2415
2416         bzero(ses, sizeof(*ses));
2417         ses->hs_used = 1;
2418
2419         for (c = cri; c != NULL; c = c->cri_next) {
2420                 switch (c->cri_alg) {
2421                 case CRYPTO_MD5:
2422                 case CRYPTO_SHA1:
2423                 case CRYPTO_MD5_HMAC:
2424                 case CRYPTO_SHA1_HMAC:
2425                         if (mac)
2426                                 return (EINVAL);
2427                         mac = 1;
2428                         ses->hs_mlen = c->cri_mlen;
2429                         if (ses->hs_mlen == 0) {
2430                                 switch (c->cri_alg) {
2431                                 case CRYPTO_MD5:
2432                                 case CRYPTO_MD5_HMAC:
2433                                         ses->hs_mlen = 16;
2434                                         break;
2435                                 case CRYPTO_SHA1:
2436                                 case CRYPTO_SHA1_HMAC:
2437                                         ses->hs_mlen = 20;
2438                                         break;
2439                                 }
2440                         }
2441                         break;
2442                 case CRYPTO_DES_CBC:
2443                 case CRYPTO_3DES_CBC:
2444                 case CRYPTO_AES_CBC:
2445                         /* XXX this may read fewer, does it matter? */
2446                         read_random(ses->hs_iv,
2447                                 c->cri_alg == CRYPTO_AES_CBC ?
2448                                         HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2449                         /*FALLTHROUGH*/
2450                 case CRYPTO_ARC4:
2451                         if (cry)
2452                                 return (EINVAL);
2453                         cry = 1;
2454                         break;
2455                 default:
2456                         return (EINVAL);
2457                 }
2458         }
2459         if (mac == 0 && cry == 0)
2460                 return (EINVAL);
2461
2462         *sidp = HIFN_SID(device_get_unit(sc->sc_dev), sesn);
2463
2464         return (0);
2465 }
2466
2467 /*
2468  * Deallocate a session.
2469  * XXX this routine should run a zero'd mac/encrypt key into context ram.
2470  * XXX to blow away any keys already stored there.
2471  */
2472 #define CRYPTO_SESID2LID(_sid)  (((u_int32_t) (_sid)) & 0xffffffff)
2473
2474 static int
2475 hifn_freesession(device_t dev, u_int64_t tid)
2476 {
2477         struct hifn_softc *sc = device_get_softc(dev);
2478         int session, error;
2479         u_int32_t sid = CRYPTO_SESID2LID(tid);
2480
2481         KASSERT(sc != NULL, ("hifn_freesession: null softc"));
2482         if (sc == NULL)
2483                 return (EINVAL);
2484
2485         HIFN_LOCK(sc);
2486         session = HIFN_SESSION(sid);
2487         if (session < sc->sc_nsessions) {
2488                 bzero(&sc->sc_sessions[session], sizeof(struct hifn_session));
2489                 error = 0;
2490         } else
2491                 error = EINVAL;
2492         HIFN_UNLOCK(sc);
2493
2494         return (error);
2495 }
2496
2497 static int
2498 hifn_process(device_t dev, struct cryptop *crp, int hint)
2499 {
2500         struct hifn_softc *sc = device_get_softc(dev);
2501         struct hifn_command *cmd = NULL;
2502         int session, err, ivlen;
2503         struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
2504
2505         if (crp == NULL || crp->crp_callback == NULL) {
2506                 hifnstats.hst_invalid++;
2507                 return (EINVAL);
2508         }
2509         session = HIFN_SESSION(crp->crp_sid);
2510
2511         if (sc == NULL || session >= sc->sc_nsessions) {
2512                 err = EINVAL;
2513                 goto errout;
2514         }
2515
2516         cmd = kmalloc(sizeof(struct hifn_command), M_DEVBUF, M_INTWAIT | M_ZERO);
2517         if (cmd == NULL) {
2518                 hifnstats.hst_nomem++;
2519                 err = ENOMEM;
2520                 goto errout;
2521         }
2522
2523         if (crp->crp_flags & CRYPTO_F_IMBUF) {
2524                 cmd->src_m = (struct mbuf *)crp->crp_buf;
2525                 cmd->dst_m = (struct mbuf *)crp->crp_buf;
2526         } else if (crp->crp_flags & CRYPTO_F_IOV) {
2527                 cmd->src_io = (struct uio *)crp->crp_buf;
2528                 cmd->dst_io = (struct uio *)crp->crp_buf;
2529         } else {
2530                 err = EINVAL;
2531                 goto errout;    /* XXX we don't handle contiguous buffers! */
2532         }
2533
2534         crd1 = crp->crp_desc;
2535         if (crd1 == NULL) {
2536                 err = EINVAL;
2537                 goto errout;
2538         }
2539         crd2 = crd1->crd_next;
2540
2541         if (crd2 == NULL) {
2542                 if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
2543                     crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2544                     crd1->crd_alg == CRYPTO_SHA1 ||
2545                     crd1->crd_alg == CRYPTO_MD5) {
2546                         maccrd = crd1;
2547                         enccrd = NULL;
2548                 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
2549                     crd1->crd_alg == CRYPTO_3DES_CBC ||
2550                     crd1->crd_alg == CRYPTO_AES_CBC ||
2551                     crd1->crd_alg == CRYPTO_ARC4) {
2552                         if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
2553                                 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2554                         maccrd = NULL;
2555                         enccrd = crd1;
2556                 } else {
2557                         err = EINVAL;
2558                         goto errout;
2559                 }
2560         } else {
2561                 if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
2562                      crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2563                      crd1->crd_alg == CRYPTO_MD5 ||
2564                      crd1->crd_alg == CRYPTO_SHA1) &&
2565                     (crd2->crd_alg == CRYPTO_DES_CBC ||
2566                      crd2->crd_alg == CRYPTO_3DES_CBC ||
2567                      crd2->crd_alg == CRYPTO_AES_CBC ||
2568                      crd2->crd_alg == CRYPTO_ARC4) &&
2569                     ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2570                         cmd->base_masks = HIFN_BASE_CMD_DECODE;
2571                         maccrd = crd1;
2572                         enccrd = crd2;
2573                 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2574                      crd1->crd_alg == CRYPTO_ARC4 ||
2575                      crd1->crd_alg == CRYPTO_3DES_CBC ||
2576                      crd1->crd_alg == CRYPTO_AES_CBC) &&
2577                     (crd2->crd_alg == CRYPTO_MD5_HMAC ||
2578                      crd2->crd_alg == CRYPTO_SHA1_HMAC ||
2579                      crd2->crd_alg == CRYPTO_MD5 ||
2580                      crd2->crd_alg == CRYPTO_SHA1) &&
2581                     (crd1->crd_flags & CRD_F_ENCRYPT)) {
2582                         enccrd = crd1;
2583                         maccrd = crd2;
2584                 } else {
2585                         /*
2586                          * We cannot order the 7751 as requested
2587                          */
2588                         err = EINVAL;
2589                         goto errout;
2590                 }
2591         }
2592
2593         if (enccrd) {
2594                 cmd->enccrd = enccrd;
2595                 cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2596                 switch (enccrd->crd_alg) {
2597                 case CRYPTO_ARC4:
2598                         cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2599                         break;
2600                 case CRYPTO_DES_CBC:
2601                         cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2602                             HIFN_CRYPT_CMD_MODE_CBC |
2603                             HIFN_CRYPT_CMD_NEW_IV;
2604                         break;
2605                 case CRYPTO_3DES_CBC:
2606                         cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2607                             HIFN_CRYPT_CMD_MODE_CBC |
2608                             HIFN_CRYPT_CMD_NEW_IV;
2609                         break;
2610                 case CRYPTO_AES_CBC:
2611                         cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
2612                             HIFN_CRYPT_CMD_MODE_CBC |
2613                             HIFN_CRYPT_CMD_NEW_IV;
2614                         break;
2615                 default:
2616                         err = EINVAL;
2617                         goto errout;
2618                 }
2619                 if (enccrd->crd_alg != CRYPTO_ARC4) {
2620                         ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
2621                                 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2622                         if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2623                                 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2624                                         bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2625                                 else
2626                                         bcopy(sc->sc_sessions[session].hs_iv,
2627                                             cmd->iv, ivlen);
2628
2629                                 if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2630                                     == 0) {
2631                                         crypto_copyback(crp->crp_flags,
2632                                             crp->crp_buf, enccrd->crd_inject,
2633                                             ivlen, cmd->iv);
2634                                 }
2635                         } else {
2636                                 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2637                                         bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2638                                 else {
2639                                         crypto_copydata(crp->crp_flags,
2640                                             crp->crp_buf, enccrd->crd_inject,
2641                                             ivlen, cmd->iv);
2642                                 }
2643                         }
2644                 }
2645
2646                 if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
2647                         cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2648                 cmd->ck = enccrd->crd_key;
2649                 cmd->cklen = enccrd->crd_klen >> 3;
2650                 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2651
2652                 /*
2653                  * Need to specify the size for the AES key in the masks.
2654                  */
2655                 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
2656                     HIFN_CRYPT_CMD_ALG_AES) {
2657                         switch (cmd->cklen) {
2658                         case 16:
2659                                 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
2660                                 break;
2661                         case 24:
2662                                 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
2663                                 break;
2664                         case 32:
2665                                 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
2666                                 break;
2667                         default:
2668                                 err = EINVAL;
2669                                 goto errout;
2670                         }
2671                 }
2672         }
2673
2674         if (maccrd) {
2675                 cmd->maccrd = maccrd;
2676                 cmd->base_masks |= HIFN_BASE_CMD_MAC;
2677
2678                 switch (maccrd->crd_alg) {
2679                 case CRYPTO_MD5:
2680                         cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2681                             HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2682                             HIFN_MAC_CMD_POS_IPSEC;
2683                        break;
2684                 case CRYPTO_MD5_HMAC:
2685                         cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2686                             HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2687                             HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2688                         break;
2689                 case CRYPTO_SHA1:
2690                         cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2691                             HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2692                             HIFN_MAC_CMD_POS_IPSEC;
2693                         break;
2694                 case CRYPTO_SHA1_HMAC:
2695                         cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2696                             HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2697                             HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2698                         break;
2699                 }
2700
2701                 if (maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
2702                     maccrd->crd_alg == CRYPTO_MD5_HMAC) {
2703                         cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2704                         bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
2705                         bzero(cmd->mac + (maccrd->crd_klen >> 3),
2706                             HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2707                 }
2708         }
2709
2710         cmd->crp = crp;
2711         cmd->session_num = session;
2712         cmd->softc = sc;
2713
2714         err = hifn_crypto(sc, cmd, crp, hint);
2715         if (!err) {
2716                 return 0;
2717         } else if (err == ERESTART) {
2718                 /*
2719                  * There weren't enough resources to dispatch the request
2720                  * to the part.  Notify the caller so they'll requeue this
2721                  * request and resubmit it again soon.
2722                  */
2723 #ifdef HIFN_DEBUG
2724                 if (hifn_debug)
2725                         device_printf(sc->sc_dev, "requeue request\n");
2726 #endif
2727                 kfree(cmd, M_DEVBUF);
2728                 sc->sc_needwakeup |= CRYPTO_SYMQ;
2729                 return (err);
2730         }
2731
2732 errout:
2733         if (cmd != NULL)
2734                 kfree(cmd, M_DEVBUF);
2735         if (err == EINVAL)
2736                 hifnstats.hst_invalid++;
2737         else
2738                 hifnstats.hst_nomem++;
2739         crp->crp_etype = err;
2740         crypto_done(crp);
2741         return (err);
2742 }
2743
2744 static void
2745 hifn_abort(struct hifn_softc *sc)
2746 {
2747         struct hifn_dma *dma = sc->sc_dma;
2748         struct hifn_command *cmd;
2749         struct cryptop *crp;
2750         int i, u;
2751
2752         i = dma->resk; u = dma->resu;
2753         while (u != 0) {
2754                 cmd = dma->hifn_commands[i];
2755                 KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i));
2756                 dma->hifn_commands[i] = NULL;
2757                 crp = cmd->crp;
2758
2759                 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2760                         /* Salvage what we can. */
2761                         u_int8_t *macbuf;
2762
2763                         if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2764                                 macbuf = dma->result_bufs[i];
2765                                 macbuf += 12;
2766                         } else
2767                                 macbuf = NULL;
2768                         hifnstats.hst_opackets++;
2769                         hifn_callback(sc, cmd, macbuf);
2770                 } else {
2771                         if (cmd->src_map == cmd->dst_map) {
2772                                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2773                                     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2774                         } else {
2775                                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2776                                     BUS_DMASYNC_POSTWRITE);
2777                                 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2778                                     BUS_DMASYNC_POSTREAD);
2779                         }
2780
2781                         if (cmd->src_m != cmd->dst_m) {
2782                                 m_freem(cmd->src_m);
2783                                 crp->crp_buf = (caddr_t)cmd->dst_m;
2784                         }
2785
2786                         /* non-shared buffers cannot be restarted */
2787                         if (cmd->src_map != cmd->dst_map) {
2788                                 /*
2789                                  * XXX should be EAGAIN, delayed until
2790                                  * after the reset.
2791                                  */
2792                                 crp->crp_etype = ENOMEM;
2793                                 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2794                                 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2795                         } else
2796                                 crp->crp_etype = ENOMEM;
2797
2798                         bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2799                         bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2800
2801                         kfree(cmd, M_DEVBUF);
2802                         if (crp->crp_etype != EAGAIN)
2803                                 crypto_done(crp);
2804                 }
2805
2806                 if (++i == HIFN_D_RES_RSIZE)
2807                         i = 0;
2808                 u--;
2809         }
2810         dma->resk = i; dma->resu = u;
2811
2812         hifn_reset_board(sc, 1);
2813         hifn_init_dma(sc);
2814         hifn_init_pci_registers(sc);
2815 }
2816
2817 static void
2818 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf)
2819 {
2820         struct hifn_dma *dma = sc->sc_dma;
2821         struct cryptop *crp = cmd->crp;
2822         struct cryptodesc *crd;
2823         struct mbuf *m;
2824         int totlen, i, u, ivlen;
2825
2826         if (cmd->src_map == cmd->dst_map) {
2827                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2828                     BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2829         } else {
2830                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2831                     BUS_DMASYNC_POSTWRITE);
2832                 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2833                     BUS_DMASYNC_POSTREAD);
2834         }
2835
2836         if (crp->crp_flags & CRYPTO_F_IMBUF) {
2837                 if (cmd->src_m != cmd->dst_m) {
2838                         crp->crp_buf = (caddr_t)cmd->dst_m;
2839                         totlen = cmd->src_mapsize;
2840                         for (m = cmd->dst_m; m != NULL; m = m->m_next) {
2841                                 if (totlen < m->m_len) {
2842                                         m->m_len = totlen;
2843                                         totlen = 0;
2844                                 } else
2845                                         totlen -= m->m_len;
2846                         }
2847                         cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len;
2848                         m_freem(cmd->src_m);
2849                 }
2850         }
2851
2852         if (cmd->sloplen != 0) {
2853                 crypto_copyback(crp->crp_flags, crp->crp_buf,
2854                     cmd->src_mapsize - cmd->sloplen, cmd->sloplen,
2855                     (caddr_t)&dma->slop[cmd->slopidx]);
2856         }
2857
2858         i = dma->dstk; u = dma->dstu;
2859         while (u != 0) {
2860                 if (i == HIFN_D_DST_RSIZE)
2861                         i = 0;
2862                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2863                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2864                 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2865                         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2866                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2867                         break;
2868                 }
2869                 i++, u--;
2870         }
2871         dma->dstk = i; dma->dstu = u;
2872
2873         hifnstats.hst_obytes += cmd->dst_mapsize;
2874
2875         if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
2876             HIFN_BASE_CMD_CRYPT) {
2877                 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2878                         if (crd->crd_alg != CRYPTO_DES_CBC &&
2879                             crd->crd_alg != CRYPTO_3DES_CBC &&
2880                             crd->crd_alg != CRYPTO_AES_CBC)
2881                                 continue;
2882                         ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
2883                                  HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2884                         crypto_copydata(crp->crp_flags, crp->crp_buf,
2885                             crd->crd_skip + crd->crd_len - ivlen, ivlen,
2886                             cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2887                         break;
2888                 }
2889         }
2890
2891         if (macbuf != NULL) {
2892                 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2893                         int len;
2894
2895                         if (crd->crd_alg != CRYPTO_MD5 &&
2896                             crd->crd_alg != CRYPTO_SHA1 &&
2897                             crd->crd_alg != CRYPTO_MD5_HMAC &&
2898                             crd->crd_alg != CRYPTO_SHA1_HMAC) {
2899                                 continue;
2900                         }
2901                         len = cmd->softc->sc_sessions[cmd->session_num].hs_mlen;
2902                         crypto_copyback(crp->crp_flags, crp->crp_buf,
2903                             crd->crd_inject, len, macbuf);
2904                         break;
2905                 }
2906         }
2907
2908         if (cmd->src_map != cmd->dst_map) {
2909                 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2910                 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2911         }
2912         bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2913         bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2914         kfree(cmd, M_DEVBUF);
2915         crypto_done(crp);
2916 }
2917
2918 /*
2919  * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
2920  * and Group 1 registers; avoid conditions that could create
2921  * burst writes by doing a read in between the writes.
2922  *
2923  * NB: The read we interpose is always to the same register;
2924  *     we do this because reading from an arbitrary (e.g. last)
2925  *     register may not always work.
2926  */
2927 static void
2928 hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2929 {
2930         if (sc->sc_flags & HIFN_IS_7811) {
2931                 if (sc->sc_bar0_lastreg == reg - 4)
2932                         bus_space_read_4(sc->sc_st0, sc->sc_sh0, HIFN_0_PUCNFG);
2933                 sc->sc_bar0_lastreg = reg;
2934         }
2935         bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
2936 }
2937
2938 static void
2939 hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2940 {
2941         if (sc->sc_flags & HIFN_IS_7811) {
2942                 if (sc->sc_bar1_lastreg == reg - 4)
2943                         bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
2944                 sc->sc_bar1_lastreg = reg;
2945         }
2946         bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
2947 }