kernel tree reorganization stage 1: Major cvs repository work (not logged as
[dragonfly.git] / sys / dev / crypto / hifn / hifn7751.c
1 /* $FreeBSD: src/sys/dev/hifn/hifn7751.c,v 1.5.2.5 2003/06/04 17:56:59 sam Exp $ */
2 /* $DragonFly: src/sys/dev/crypto/hifn/hifn7751.c,v 1.4 2003/08/07 21:16:49 dillon Exp $ */
3 /*      $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $  */
4
5 /*
6  * Invertex AEON / Hifn 7751 driver
7  * Copyright (c) 1999 Invertex Inc. All rights reserved.
8  * Copyright (c) 1999 Theo de Raadt
9  * Copyright (c) 2000-2001 Network Security Technologies, Inc.
10  *                      http://www.netsec.net
11  *
12  * This driver is based on a previous driver by Invertex, for which they
13  * requested:  Please send any comments, feedback, bug-fixes, or feature
14  * requests to software@invertex.com.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  *
20  * 1. Redistributions of source code must retain the above copyright
21  *   notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *   notice, this list of conditions and the following disclaimer in the
24  *   documentation and/or other materials provided with the distribution.
25  * 3. The name of the author may not be used to endorse or promote products
26  *   derived from this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
29  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
30  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
31  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
32  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
33  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
37  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38  *
39  * Effort sponsored in part by the Defense Advanced Research Projects
40  * Agency (DARPA) and Air Force Research Laboratory, Air Force
41  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
42  *
43  */
44
45 /*
46  * Driver for the Hifn 7751 encryption processor.
47  */
48 #include "opt_hifn.h"
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/proc.h>
53 #include <sys/errno.h>
54 #include <sys/malloc.h>
55 #include <sys/kernel.h>
56 #include <sys/mbuf.h>
57 #include <sys/sysctl.h>
58
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61
62 #include <machine/clock.h>
63 #include <machine/bus.h>
64 #include <machine/resource.h>
65 #include <sys/bus.h>
66 #include <sys/rman.h>
67
68 #include <opencrypto/cryptodev.h>
69 #include <sys/random.h>
70
71 #include <bus/pci/pcivar.h>
72 #include <bus/pci/pcireg.h>
73
74 #ifdef HIFN_RNDTEST
75 #include "../rndtest/rndtest.h"
76 #endif
77 #include "hifn7751reg.h"
78 #include "hifn7751var.h"
79
80 /*
81  * Prototypes and count for the pci_device structure
82  */
83 static  int hifn_probe(device_t);
84 static  int hifn_attach(device_t);
85 static  int hifn_detach(device_t);
86 static  int hifn_suspend(device_t);
87 static  int hifn_resume(device_t);
88 static  void hifn_shutdown(device_t);
89
90 static device_method_t hifn_methods[] = {
91         /* Device interface */
92         DEVMETHOD(device_probe,         hifn_probe),
93         DEVMETHOD(device_attach,        hifn_attach),
94         DEVMETHOD(device_detach,        hifn_detach),
95         DEVMETHOD(device_suspend,       hifn_suspend),
96         DEVMETHOD(device_resume,        hifn_resume),
97         DEVMETHOD(device_shutdown,      hifn_shutdown),
98
99         /* bus interface */
100         DEVMETHOD(bus_print_child,      bus_generic_print_child),
101         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
102
103         { 0, 0 }
104 };
105 static driver_t hifn_driver = {
106         "hifn",
107         hifn_methods,
108         sizeof (struct hifn_softc)
109 };
110 static devclass_t hifn_devclass;
111
112 DRIVER_MODULE(hifn, pci, hifn_driver, hifn_devclass, 0, 0);
113 MODULE_DEPEND(hifn, crypto, 1, 1, 1);
114 #ifdef HIFN_RNDTEST
115 MODULE_DEPEND(hifn, rndtest, 1, 1, 1);
116 #endif
117
118 static  void hifn_reset_board(struct hifn_softc *, int);
119 static  void hifn_reset_puc(struct hifn_softc *);
120 static  void hifn_puc_wait(struct hifn_softc *);
121 static  int hifn_enable_crypto(struct hifn_softc *);
122 static  void hifn_set_retry(struct hifn_softc *sc);
123 static  void hifn_init_dma(struct hifn_softc *);
124 static  void hifn_init_pci_registers(struct hifn_softc *);
125 static  int hifn_sramsize(struct hifn_softc *);
126 static  int hifn_dramsize(struct hifn_softc *);
127 static  int hifn_ramtype(struct hifn_softc *);
128 static  void hifn_sessions(struct hifn_softc *);
129 static  void hifn_intr(void *);
130 static  u_int hifn_write_command(struct hifn_command *, u_int8_t *);
131 static  u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
132 static  int hifn_newsession(void *, u_int32_t *, struct cryptoini *);
133 static  int hifn_freesession(void *, u_int64_t);
134 static  int hifn_process(void *, struct cryptop *, int);
135 static  void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
136 static  int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int);
137 static  int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
138 static  int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
139 static  int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
140 static  int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
141 static  int hifn_init_pubrng(struct hifn_softc *);
142 #ifndef HIFN_NO_RNG
143 static  void hifn_rng(void *);
144 #endif
145 static  void hifn_tick(void *);
146 static  void hifn_abort(struct hifn_softc *);
147 static  void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
148
149 static  void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t);
150 static  void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t);
151
152 static __inline__ u_int32_t
153 READ_REG_0(struct hifn_softc *sc, bus_size_t reg)
154 {
155     u_int32_t v = bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg);
156     sc->sc_bar0_lastreg = (bus_size_t) -1;
157     return (v);
158 }
159 #define WRITE_REG_0(sc, reg, val)       hifn_write_reg_0(sc, reg, val)
160
161 static __inline__ u_int32_t
162 READ_REG_1(struct hifn_softc *sc, bus_size_t reg)
163 {
164     u_int32_t v = bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg);
165     sc->sc_bar1_lastreg = (bus_size_t) -1;
166     return (v);
167 }
168 #define WRITE_REG_1(sc, reg, val)       hifn_write_reg_1(sc, reg, val)
169
170 SYSCTL_NODE(_hw, OID_AUTO, hifn, CTLFLAG_RD, 0, "Hifn driver parameters");
171
172 #ifdef HIFN_DEBUG
173 static  int hifn_debug = 0;
174 SYSCTL_INT(_hw_hifn, OID_AUTO, debug, CTLFLAG_RW, &hifn_debug,
175             0, "control debugging msgs");
176 #endif
177
178 static  struct hifn_stats hifnstats;
179 SYSCTL_STRUCT(_hw_hifn, OID_AUTO, stats, CTLFLAG_RD, &hifnstats,
180             hifn_stats, "driver statistics");
181 static  int hifn_maxbatch = 1;
182 SYSCTL_INT(_hw_hifn, OID_AUTO, maxbatch, CTLFLAG_RW, &hifn_maxbatch,
183             0, "max ops to batch w/o interrupt");
184
185 /*
186  * Probe for a supported device.  The PCI vendor and device
187  * IDs are used to detect devices we know how to handle.
188  */
189 static int
190 hifn_probe(device_t dev)
191 {
192         if (pci_get_vendor(dev) == PCI_VENDOR_INVERTEX &&
193             pci_get_device(dev) == PCI_PRODUCT_INVERTEX_AEON)
194                 return (0);
195         if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
196             (pci_get_device(dev) == PCI_PRODUCT_HIFN_7751 ||
197              pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
198              pci_get_device(dev) == PCI_PRODUCT_HIFN_7811))
199                 return (0);
200         if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
201             pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751)
202                 return (0);
203         return (ENXIO);
204 }
205
206 static void
207 hifn_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
208 {
209         bus_addr_t *paddr = (bus_addr_t*) arg;
210         *paddr = segs->ds_addr;
211 }
212
213 static const char*
214 hifn_partname(struct hifn_softc *sc)
215 {
216         /* XXX sprintf numbers when not decoded */
217         switch (pci_get_vendor(sc->sc_dev)) {
218         case PCI_VENDOR_HIFN:
219                 switch (pci_get_device(sc->sc_dev)) {
220                 case PCI_PRODUCT_HIFN_6500:     return "Hifn 6500";
221                 case PCI_PRODUCT_HIFN_7751:     return "Hifn 7751";
222                 case PCI_PRODUCT_HIFN_7811:     return "Hifn 7811";
223                 case PCI_PRODUCT_HIFN_7951:     return "Hifn 7951";
224                 }
225                 return "Hifn unknown-part";
226         case PCI_VENDOR_INVERTEX:
227                 switch (pci_get_device(sc->sc_dev)) {
228                 case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON";
229                 }
230                 return "Invertex unknown-part";
231         case PCI_VENDOR_NETSEC:
232                 switch (pci_get_device(sc->sc_dev)) {
233                 case PCI_PRODUCT_NETSEC_7751:   return "NetSec 7751";
234                 }
235                 return "NetSec unknown-part";
236         }
237         return "Unknown-vendor unknown-part";
238 }
239
240 static void
241 default_harvest(struct rndtest_state *rsp, void *buf, u_int count)
242 {
243         u_int32_t *p = (u_int32_t *)buf;
244         for (count /= sizeof (u_int32_t); count; count--)
245                 add_true_randomness(*p++);
246 }
247
248 /*
249  * Attach an interface that successfully probed.
250  */
251 static int 
252 hifn_attach(device_t dev)
253 {
254         struct hifn_softc *sc = device_get_softc(dev);
255         u_int32_t cmd;
256         caddr_t kva;
257         int rseg, rid;
258         char rbase;
259         u_int16_t ena, rev;
260
261         KASSERT(sc != NULL, ("hifn_attach: null software carrier!"));
262         bzero(sc, sizeof (*sc));
263         sc->sc_dev = dev;
264
265         /* XXX handle power management */
266
267         /*
268          * The 7951 has a random number generator and
269          * public key support; note this.
270          */
271         if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
272             pci_get_device(dev) == PCI_PRODUCT_HIFN_7951)
273                 sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC;
274         /*
275          * The 7811 has a random number generator and
276          * we also note it's identity 'cuz of some quirks.
277          */
278         if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
279             pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)
280                 sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG;
281
282         /*
283          * Configure support for memory-mapped access to
284          * registers and for DMA operations.
285          */
286 #define PCIM_ENA        (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN)
287         cmd = pci_read_config(dev, PCIR_COMMAND, 4);
288         cmd |= PCIM_ENA;
289         pci_write_config(dev, PCIR_COMMAND, cmd, 4);
290         cmd = pci_read_config(dev, PCIR_COMMAND, 4);
291         if ((cmd & PCIM_ENA) != PCIM_ENA) {
292                 device_printf(dev, "failed to enable %s\n",
293                         (cmd & PCIM_ENA) == 0 ?
294                                 "memory mapping & bus mastering" :
295                         (cmd & PCIM_CMD_MEMEN) == 0 ?
296                                 "memory mapping" : "bus mastering");
297                 goto fail_pci;
298         }
299 #undef PCIM_ENA
300
301         /*
302          * Setup PCI resources. Note that we record the bus
303          * tag and handle for each register mapping, this is
304          * used by the READ_REG_0, WRITE_REG_0, READ_REG_1,
305          * and WRITE_REG_1 macros throughout the driver.
306          */
307         rid = HIFN_BAR0;
308         sc->sc_bar0res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
309                                             0, ~0, 1, RF_ACTIVE);
310         if (sc->sc_bar0res == NULL) {
311                 device_printf(dev, "cannot map bar%d register space\n", 0);
312                 goto fail_pci;
313         }
314         sc->sc_st0 = rman_get_bustag(sc->sc_bar0res);
315         sc->sc_sh0 = rman_get_bushandle(sc->sc_bar0res);
316         sc->sc_bar0_lastreg = (bus_size_t) -1;
317
318         rid = HIFN_BAR1;
319         sc->sc_bar1res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
320                                             0, ~0, 1, RF_ACTIVE);
321         if (sc->sc_bar1res == NULL) {
322                 device_printf(dev, "cannot map bar%d register space\n", 1);
323                 goto fail_io0;
324         }
325         sc->sc_st1 = rman_get_bustag(sc->sc_bar1res);
326         sc->sc_sh1 = rman_get_bushandle(sc->sc_bar1res);
327         sc->sc_bar1_lastreg = (bus_size_t) -1;
328
329         hifn_set_retry(sc);
330
331         /*
332          * Setup the area where the Hifn DMA's descriptors
333          * and associated data structures.
334          */
335         if (bus_dma_tag_create(NULL,                    /* parent */
336                                1, 0,                    /* alignment,boundary */
337                                BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
338                                BUS_SPACE_MAXADDR,       /* highaddr */
339                                NULL, NULL,              /* filter, filterarg */
340                                HIFN_MAX_DMALEN,         /* maxsize */
341                                MAX_SCATTER,             /* nsegments */
342                                HIFN_MAX_SEGLEN,         /* maxsegsize */
343                                BUS_DMA_ALLOCNOW,        /* flags */
344                                &sc->sc_dmat)) {
345                 device_printf(dev, "cannot allocate DMA tag\n");
346                 goto fail_io1;
347         }
348         if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
349                 device_printf(dev, "cannot create dma map\n");
350                 bus_dma_tag_destroy(sc->sc_dmat);
351                 goto fail_io1;
352         }
353         if (bus_dmamem_alloc(sc->sc_dmat, (void**) &kva, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
354                 device_printf(dev, "cannot alloc dma buffer\n");
355                 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
356                 bus_dma_tag_destroy(sc->sc_dmat);
357                 goto fail_io1;
358         }
359         if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, kva,
360                              sizeof (*sc->sc_dma),
361                              hifn_dmamap_cb, &sc->sc_dma_physaddr,
362                              BUS_DMA_NOWAIT)) {
363                 device_printf(dev, "cannot load dma map\n");
364                 bus_dmamem_free(sc->sc_dmat, kva, sc->sc_dmamap);
365                 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
366                 bus_dma_tag_destroy(sc->sc_dmat);
367                 goto fail_io1;
368         }
369         sc->sc_dma = (struct hifn_dma *)kva;
370         bzero(sc->sc_dma, sizeof(*sc->sc_dma));
371
372         KASSERT(sc->sc_st0 != NULL, ("hifn_attach: null bar0 tag!"));
373         KASSERT(sc->sc_sh0 != NULL, ("hifn_attach: null bar0 handle!"));
374         KASSERT(sc->sc_st1 != NULL, ("hifn_attach: null bar1 tag!"));
375         KASSERT(sc->sc_sh1 != NULL, ("hifn_attach: null bar1 handle!"));
376
377         /*
378          * Reset the board and do the ``secret handshake''
379          * to enable the crypto support.  Then complete the
380          * initialization procedure by setting up the interrupt
381          * and hooking in to the system crypto support so we'll
382          * get used for system services like the crypto device,
383          * IPsec, RNG device, etc.
384          */
385         hifn_reset_board(sc, 0);
386
387         if (hifn_enable_crypto(sc) != 0) {
388                 device_printf(dev, "crypto enabling failed\n");
389                 goto fail_mem;
390         }
391         hifn_reset_puc(sc);
392
393         hifn_init_dma(sc);
394         hifn_init_pci_registers(sc);
395
396         if (hifn_ramtype(sc))
397                 goto fail_mem;
398
399         if (sc->sc_drammodel == 0)
400                 hifn_sramsize(sc);
401         else
402                 hifn_dramsize(sc);
403
404         /*
405          * Workaround for NetSec 7751 rev A: half ram size because two
406          * of the address lines were left floating
407          */
408         if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
409             pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 &&
410             pci_get_revid(dev) == 0x61) /*XXX???*/
411                 sc->sc_ramsize >>= 1;
412
413         /*
414          * Arrange the interrupt line.
415          */
416         rid = 0;
417         sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid,
418                                         0, ~0, 1, RF_SHAREABLE|RF_ACTIVE);
419         if (sc->sc_irq == NULL) {
420                 device_printf(dev, "could not map interrupt\n");
421                 goto fail_mem;
422         }
423         /*
424          * NB: Network code assumes we are blocked with splimp()
425          *     so make sure the IRQ is marked appropriately.
426          */
427         if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET,
428                            hifn_intr, sc, &sc->sc_intrhand)) {
429                 device_printf(dev, "could not setup interrupt\n");
430                 goto fail_intr2;
431         }
432
433         hifn_sessions(sc);
434
435         /*
436          * NB: Keep only the low 16 bits; this masks the chip id
437          *     from the 7951.
438          */
439         rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff;
440
441         rseg = sc->sc_ramsize / 1024;
442         rbase = 'K';
443         if (sc->sc_ramsize >= (1024 * 1024)) {
444                 rbase = 'M';
445                 rseg /= 1024;
446         }
447         device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram, %u sessions\n",
448                 hifn_partname(sc), rev,
449                 rseg, rbase, sc->sc_drammodel ? 'd' : 's',
450                 sc->sc_maxses);
451
452         sc->sc_cid = crypto_get_driverid(0);
453         if (sc->sc_cid < 0) {
454                 device_printf(dev, "could not get crypto driver id\n");
455                 goto fail_intr;
456         }
457
458         WRITE_REG_0(sc, HIFN_0_PUCNFG,
459             READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
460         ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
461
462         switch (ena) {
463         case HIFN_PUSTAT_ENA_2:
464                 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0,
465                     hifn_newsession, hifn_freesession, hifn_process, sc);
466                 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0,
467                     hifn_newsession, hifn_freesession, hifn_process, sc);
468                 /*FALLTHROUGH*/
469         case HIFN_PUSTAT_ENA_1:
470                 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0,
471                     hifn_newsession, hifn_freesession, hifn_process, sc);
472                 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0,
473                     hifn_newsession, hifn_freesession, hifn_process, sc);
474                 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0,
475                     hifn_newsession, hifn_freesession, hifn_process, sc);
476                 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0,
477                     hifn_newsession, hifn_freesession, hifn_process, sc);
478                 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0,
479                     hifn_newsession, hifn_freesession, hifn_process, sc);
480                 break;
481         }
482
483         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
484             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
485
486         if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
487                 hifn_init_pubrng(sc);
488
489         /* NB: 1 means the callout runs w/o Giant locked */
490         callout_init(&sc->sc_tickto);
491         callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
492
493         return (0);
494
495 fail_intr:
496         bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
497 fail_intr2:
498         /* XXX don't store rid */
499         bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
500 fail_mem:
501         bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
502         bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
503         bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
504         bus_dma_tag_destroy(sc->sc_dmat);
505
506         /* Turn off DMA polling */
507         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
508             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
509 fail_io1:
510         bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
511 fail_io0:
512         bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
513 fail_pci:
514         return (ENXIO);
515 }
516
517 /*
518  * Detach an interface that successfully probed.
519  */
520 static int 
521 hifn_detach(device_t dev)
522 {
523         struct hifn_softc *sc = device_get_softc(dev);
524         int s;
525
526         KASSERT(sc != NULL, ("hifn_detach: null software carrier!"));
527
528         s = splimp();
529
530         /*XXX other resources */
531         callout_stop(&sc->sc_tickto);
532         callout_stop(&sc->sc_rngto);
533 #ifdef HIFN_RNDTEST
534         if (sc->sc_rndtest)
535                 rndtest_detach(sc->sc_rndtest);
536 #endif
537
538         /* Turn off DMA polling */
539         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
540             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
541
542         crypto_unregister_all(sc->sc_cid);
543
544         bus_generic_detach(dev);        /*XXX should be no children, right? */
545
546         bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
547         /* XXX don't store rid */
548         bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
549
550         bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
551         bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
552         bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
553         bus_dma_tag_destroy(sc->sc_dmat);
554
555         bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
556         bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
557
558         splx(s);
559
560         return (0);
561 }
562
563 /*
564  * Stop all chip I/O so that the kernel's probe routines don't
565  * get confused by errant DMAs when rebooting.
566  */
567 static void
568 hifn_shutdown(device_t dev)
569 {
570 #ifdef notyet
571         hifn_stop(device_get_softc(dev));
572 #endif
573 }
574
575 /*
576  * Device suspend routine.  Stop the interface and save some PCI
577  * settings in case the BIOS doesn't restore them properly on
578  * resume.
579  */
580 static int
581 hifn_suspend(device_t dev)
582 {
583         struct hifn_softc *sc = device_get_softc(dev);
584 #ifdef notyet
585         int i;
586
587         hifn_stop(sc);
588         for (i = 0; i < 5; i++)
589                 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4);
590         sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
591         sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
592         sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
593         sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
594 #endif
595         sc->sc_suspended = 1;
596
597         return (0);
598 }
599
600 /*
601  * Device resume routine.  Restore some PCI settings in case the BIOS
602  * doesn't, re-enable busmastering, and restart the interface if
603  * appropriate.
604  */
605 static int
606 hifn_resume(device_t dev)
607 {
608         struct hifn_softc *sc = device_get_softc(dev);
609 #ifdef notyet
610         int i;
611
612         /* better way to do this? */
613         for (i = 0; i < 5; i++)
614                 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4);
615         pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4);
616         pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1);
617         pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1);
618         pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1);
619
620         /* reenable busmastering */
621         pci_enable_busmaster(dev);
622         pci_enable_io(dev, HIFN_RES);
623
624         /* reinitialize interface if necessary */
625         if (ifp->if_flags & IFF_UP)
626                 rl_init(sc);
627 #endif
628         sc->sc_suspended = 0;
629
630         return (0);
631 }
632
633 static int
634 hifn_init_pubrng(struct hifn_softc *sc)
635 {
636         u_int32_t r;
637         int i;
638
639 #ifdef HIFN_RNDTEST
640         sc->sc_rndtest = rndtest_attach(sc->sc_dev);
641         if (sc->sc_rndtest)
642                 sc->sc_harvest = rndtest_harvest;
643         else
644                 sc->sc_harvest = default_harvest;
645 #else
646         sc->sc_harvest = default_harvest;
647 #endif
648         if ((sc->sc_flags & HIFN_IS_7811) == 0) {
649                 /* Reset 7951 public key/rng engine */
650                 WRITE_REG_1(sc, HIFN_1_PUB_RESET,
651                     READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
652
653                 for (i = 0; i < 100; i++) {
654                         DELAY(1000);
655                         if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
656                             HIFN_PUBRST_RESET) == 0)
657                                 break;
658                 }
659
660                 if (i == 100) {
661                         device_printf(sc->sc_dev, "public key init failed\n");
662                         return (1);
663                 }
664         }
665
666 #ifndef HIFN_NO_RNG
667         /* Enable the rng, if available */
668         if (sc->sc_flags & HIFN_HAS_RNG) {
669                 if (sc->sc_flags & HIFN_IS_7811) {
670                         r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
671                         if (r & HIFN_7811_RNGENA_ENA) {
672                                 r &= ~HIFN_7811_RNGENA_ENA;
673                                 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
674                         }
675                         WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
676                             HIFN_7811_RNGCFG_DEFL);
677                         r |= HIFN_7811_RNGENA_ENA;
678                         WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
679                 } else
680                         WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
681                             READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
682                             HIFN_RNGCFG_ENA);
683
684                 sc->sc_rngfirst = 1;
685                 if (hz >= 100)
686                         sc->sc_rnghz = hz / 100;
687                 else
688                         sc->sc_rnghz = 1;
689                 /* NB: 1 means the callout runs w/o Giant locked */
690                 callout_init(&sc->sc_rngto);
691                 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
692         }
693 #endif
694
695         /* Enable public key engine, if available */
696         if (sc->sc_flags & HIFN_HAS_PUBLIC) {
697                 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
698                 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
699                 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
700         }
701
702         return (0);
703 }
704
705 #ifndef HIFN_NO_RNG
706 static void
707 hifn_rng(void *vsc)
708 {
709 #define RANDOM_BITS(n)  (n)*sizeof (u_int32_t), (n)*sizeof (u_int32_t)*NBBY, 0
710         struct hifn_softc *sc = vsc;
711         u_int32_t sts, num[2];
712         int i;
713
714         if (sc->sc_flags & HIFN_IS_7811) {
715                 for (i = 0; i < 5; i++) {
716                         sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
717                         if (sts & HIFN_7811_RNGSTS_UFL) {
718                                 device_printf(sc->sc_dev,
719                                               "RNG underflow: disabling\n");
720                                 return;
721                         }
722                         if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
723                                 break;
724
725                         /*
726                          * There are at least two words in the RNG FIFO
727                          * at this point.
728                          */
729                         num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
730                         num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
731                         /* NB: discard first data read */
732                         if (sc->sc_rngfirst)
733                                 sc->sc_rngfirst = 0;
734                         else
735                                 (*sc->sc_harvest)(sc->sc_rndtest,
736                                         num, sizeof (num));
737                 }
738         } else {
739                 num[0] = READ_REG_1(sc, HIFN_1_RNG_DATA);
740
741                 /* NB: discard first data read */
742                 if (sc->sc_rngfirst)
743                         sc->sc_rngfirst = 0;
744                 else
745                         (*sc->sc_harvest)(sc->sc_rndtest,
746                                 num, sizeof (num[0]));
747         }
748
749         callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
750 #undef RANDOM_BITS
751 }
752 #endif
753
754 static void
755 hifn_puc_wait(struct hifn_softc *sc)
756 {
757         int i;
758
759         for (i = 5000; i > 0; i--) {
760                 DELAY(1);
761                 if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET))
762                         break;
763         }
764         if (!i)
765                 device_printf(sc->sc_dev, "proc unit did not reset\n");
766 }
767
768 /*
769  * Reset the processing unit.
770  */
771 static void
772 hifn_reset_puc(struct hifn_softc *sc)
773 {
774         /* Reset processing unit */
775         WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
776         hifn_puc_wait(sc);
777 }
778
779 /*
780  * Set the Retry and TRDY registers; note that we set them to
781  * zero because the 7811 locks up when forced to retry (section
782  * 3.6 of "Specification Update SU-0014-04".  Not clear if we
783  * should do this for all Hifn parts, but it doesn't seem to hurt.
784  */
785 static void
786 hifn_set_retry(struct hifn_softc *sc)
787 {
788         /* NB: RETRY only responds to 8-bit reads/writes */
789         pci_write_config(sc->sc_dev, HIFN_RETRY_TIMEOUT, 0, 1);
790         pci_write_config(sc->sc_dev, HIFN_TRDY_TIMEOUT, 0, 4);
791 }
792
793 /*
794  * Resets the board.  Values in the regesters are left as is
795  * from the reset (i.e. initial values are assigned elsewhere).
796  */
797 static void
798 hifn_reset_board(struct hifn_softc *sc, int full)
799 {
800         u_int32_t reg;
801
802         /*
803          * Set polling in the DMA configuration register to zero.  0x7 avoids
804          * resetting the board and zeros out the other fields.
805          */
806         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
807             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
808
809         /*
810          * Now that polling has been disabled, we have to wait 1 ms
811          * before resetting the board.
812          */
813         DELAY(1000);
814
815         /* Reset the DMA unit */
816         if (full) {
817                 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
818                 DELAY(1000);
819         } else {
820                 WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
821                     HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
822                 hifn_reset_puc(sc);
823         }
824
825         KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!"));
826         bzero(sc->sc_dma, sizeof(*sc->sc_dma));
827
828         /* Bring dma unit out of reset */
829         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
830             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
831
832         hifn_puc_wait(sc);
833         hifn_set_retry(sc);
834
835         if (sc->sc_flags & HIFN_IS_7811) {
836                 for (reg = 0; reg < 1000; reg++) {
837                         if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
838                             HIFN_MIPSRST_CRAMINIT)
839                                 break;
840                         DELAY(1000);
841                 }
842                 if (reg == 1000)
843                         printf(": cram init timeout\n");
844         }
845 }
846
847 static u_int32_t
848 hifn_next_signature(u_int32_t a, u_int cnt)
849 {
850         int i;
851         u_int32_t v;
852
853         for (i = 0; i < cnt; i++) {
854
855                 /* get the parity */
856                 v = a & 0x80080125;
857                 v ^= v >> 16;
858                 v ^= v >> 8;
859                 v ^= v >> 4;
860                 v ^= v >> 2;
861                 v ^= v >> 1;
862
863                 a = (v & 1) ^ (a << 1);
864         }
865
866         return a;
867 }
868
869 struct pci2id {
870         u_short         pci_vendor;
871         u_short         pci_prod;
872         char            card_id[13];
873 };
874 static struct pci2id pci2id[] = {
875         {
876                 PCI_VENDOR_HIFN,
877                 PCI_PRODUCT_HIFN_7951,
878                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
879                   0x00, 0x00, 0x00, 0x00, 0x00 }
880         }, {
881                 PCI_VENDOR_NETSEC,
882                 PCI_PRODUCT_NETSEC_7751,
883                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
884                   0x00, 0x00, 0x00, 0x00, 0x00 }
885         }, {
886                 PCI_VENDOR_INVERTEX,
887                 PCI_PRODUCT_INVERTEX_AEON,
888                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
889                   0x00, 0x00, 0x00, 0x00, 0x00 }
890         }, {
891                 PCI_VENDOR_HIFN,
892                 PCI_PRODUCT_HIFN_7811,
893                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
894                   0x00, 0x00, 0x00, 0x00, 0x00 }
895         }, {
896                 /*
897                  * Other vendors share this PCI ID as well, such as
898                  * http://www.powercrypt.com, and obviously they also
899                  * use the same key.
900                  */
901                 PCI_VENDOR_HIFN,
902                 PCI_PRODUCT_HIFN_7751,
903                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
904                   0x00, 0x00, 0x00, 0x00, 0x00 }
905         },
906 };
907
908 /*
909  * Checks to see if crypto is already enabled.  If crypto isn't enable,
910  * "hifn_enable_crypto" is called to enable it.  The check is important,
911  * as enabling crypto twice will lock the board.
912  */
913 static int 
914 hifn_enable_crypto(struct hifn_softc *sc)
915 {
916         u_int32_t dmacfg, ramcfg, encl, addr, i;
917         char *offtbl = NULL;
918
919         for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
920                 if (pci2id[i].pci_vendor == pci_get_vendor(sc->sc_dev) &&
921                     pci2id[i].pci_prod == pci_get_device(sc->sc_dev)) {
922                         offtbl = pci2id[i].card_id;
923                         break;
924                 }
925         }
926         if (offtbl == NULL) {
927                 device_printf(sc->sc_dev, "Unknown card!\n");
928                 return (1);
929         }
930
931         ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
932         dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
933
934         /*
935          * The RAM config register's encrypt level bit needs to be set before
936          * every read performed on the encryption level register.
937          */
938         WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
939
940         encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
941
942         /*
943          * Make sure we don't re-unlock.  Two unlocks kills chip until the
944          * next reboot.
945          */
946         if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
947 #ifdef HIFN_DEBUG
948                 if (hifn_debug)
949                         device_printf(sc->sc_dev,
950                             "Strong crypto already enabled!\n");
951 #endif
952                 goto report;
953         }
954
955         if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
956 #ifdef HIFN_DEBUG
957                 if (hifn_debug)
958                         device_printf(sc->sc_dev,
959                               "Unknown encryption level 0x%x\n", encl);
960 #endif
961                 return 1;
962         }
963
964         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
965             HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
966         DELAY(1000);
967         addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1);
968         DELAY(1000);
969         WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0);
970         DELAY(1000);
971
972         for (i = 0; i <= 12; i++) {
973                 addr = hifn_next_signature(addr, offtbl[i] + 0x101);
974                 WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr);
975
976                 DELAY(1000);
977         }
978
979         WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
980         encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
981
982 #ifdef HIFN_DEBUG
983         if (hifn_debug) {
984                 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
985                         device_printf(sc->sc_dev, "Engine is permanently "
986                                 "locked until next system reset!\n");
987                 else
988                         device_printf(sc->sc_dev, "Engine enabled "
989                                 "successfully!\n");
990         }
991 #endif
992
993 report:
994         WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
995         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
996
997         switch (encl) {
998         case HIFN_PUSTAT_ENA_1:
999         case HIFN_PUSTAT_ENA_2:
1000                 break;
1001         case HIFN_PUSTAT_ENA_0:
1002         default:
1003                 device_printf(sc->sc_dev, "disabled");
1004                 break;
1005         }
1006
1007         return 0;
1008 }
1009
1010 /*
1011  * Give initial values to the registers listed in the "Register Space"
1012  * section of the HIFN Software Development reference manual.
1013  */
1014 static void 
1015 hifn_init_pci_registers(struct hifn_softc *sc)
1016 {
1017         /* write fixed values needed by the Initialization registers */
1018         WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
1019         WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
1020         WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
1021
1022         /* write all 4 ring address registers */
1023         WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr +
1024             offsetof(struct hifn_dma, cmdr[0]));
1025         WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr +
1026             offsetof(struct hifn_dma, srcr[0]));
1027         WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr +
1028             offsetof(struct hifn_dma, dstr[0]));
1029         WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr +
1030             offsetof(struct hifn_dma, resr[0]));
1031
1032         DELAY(2000);
1033
1034         /* write status register */
1035         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1036             HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
1037             HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
1038             HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
1039             HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
1040             HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
1041             HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
1042             HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
1043             HIFN_DMACSR_S_WAIT |
1044             HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
1045             HIFN_DMACSR_C_WAIT |
1046             HIFN_DMACSR_ENGINE |
1047             ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
1048                 HIFN_DMACSR_PUBDONE : 0) |
1049             ((sc->sc_flags & HIFN_IS_7811) ?
1050                 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
1051
1052         sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
1053         sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
1054             HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
1055             HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
1056             ((sc->sc_flags & HIFN_IS_7811) ?
1057                 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
1058         sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1059         WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1060
1061         WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1062             HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
1063             HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
1064             (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
1065
1066         WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1067         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
1068             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
1069             ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
1070             ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
1071 }
1072
1073 /*
1074  * The maximum number of sessions supported by the card
1075  * is dependent on the amount of context ram, which
1076  * encryption algorithms are enabled, and how compression
1077  * is configured.  This should be configured before this
1078  * routine is called.
1079  */
1080 static void
1081 hifn_sessions(struct hifn_softc *sc)
1082 {
1083         u_int32_t pucnfg;
1084         int ctxsize;
1085
1086         pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1087
1088         if (pucnfg & HIFN_PUCNFG_COMPSING) {
1089                 if (pucnfg & HIFN_PUCNFG_ENCCNFG)
1090                         ctxsize = 128;
1091                 else
1092                         ctxsize = 512;
1093                 sc->sc_maxses = 1 +
1094                     ((sc->sc_ramsize - 32768) / ctxsize);
1095         } else
1096                 sc->sc_maxses = sc->sc_ramsize / 16384;
1097
1098         if (sc->sc_maxses > 2048)
1099                 sc->sc_maxses = 2048;
1100 }
1101
1102 /*
1103  * Determine ram type (sram or dram).  Board should be just out of a reset
1104  * state when this is called.
1105  */
1106 static int
1107 hifn_ramtype(struct hifn_softc *sc)
1108 {
1109         u_int8_t data[8], dataexpect[8];
1110         int i;
1111
1112         for (i = 0; i < sizeof(data); i++)
1113                 data[i] = dataexpect[i] = 0x55;
1114         if (hifn_writeramaddr(sc, 0, data))
1115                 return (-1);
1116         if (hifn_readramaddr(sc, 0, data))
1117                 return (-1);
1118         if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1119                 sc->sc_drammodel = 1;
1120                 return (0);
1121         }
1122
1123         for (i = 0; i < sizeof(data); i++)
1124                 data[i] = dataexpect[i] = 0xaa;
1125         if (hifn_writeramaddr(sc, 0, data))
1126                 return (-1);
1127         if (hifn_readramaddr(sc, 0, data))
1128                 return (-1);
1129         if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1130                 sc->sc_drammodel = 1;
1131                 return (0);
1132         }
1133
1134         return (0);
1135 }
1136
1137 #define HIFN_SRAM_MAX           (32 << 20)
1138 #define HIFN_SRAM_STEP_SIZE     16384
1139 #define HIFN_SRAM_GRANULARITY   (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1140
1141 static int
1142 hifn_sramsize(struct hifn_softc *sc)
1143 {
1144         u_int32_t a;
1145         u_int8_t data[8];
1146         u_int8_t dataexpect[sizeof(data)];
1147         int32_t i;
1148
1149         for (i = 0; i < sizeof(data); i++)
1150                 data[i] = dataexpect[i] = i ^ 0x5a;
1151
1152         for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
1153                 a = i * HIFN_SRAM_STEP_SIZE;
1154                 bcopy(&i, data, sizeof(i));
1155                 hifn_writeramaddr(sc, a, data);
1156         }
1157
1158         for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1159                 a = i * HIFN_SRAM_STEP_SIZE;
1160                 bcopy(&i, dataexpect, sizeof(i));
1161                 if (hifn_readramaddr(sc, a, data) < 0)
1162                         return (0);
1163                 if (bcmp(data, dataexpect, sizeof(data)) != 0)
1164                         return (0);
1165                 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
1166         }
1167
1168         return (0);
1169 }
1170
1171 /*
1172  * XXX For dram boards, one should really try all of the
1173  * HIFN_PUCNFG_DSZ_*'s.  This just assumes that PUCNFG
1174  * is already set up correctly.
1175  */
1176 static int
1177 hifn_dramsize(struct hifn_softc *sc)
1178 {
1179         u_int32_t cnfg;
1180
1181         cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1182             HIFN_PUCNFG_DRAMMASK;
1183         sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1184         return (0);
1185 }
1186
1187 static void
1188 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp)
1189 {
1190         struct hifn_dma *dma = sc->sc_dma;
1191
1192         if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1193                 dma->cmdi = 0;
1194                 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1195                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1196                 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1197                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1198         }
1199         *cmdp = dma->cmdi++;
1200         dma->cmdk = dma->cmdi;
1201
1202         if (dma->srci == HIFN_D_SRC_RSIZE) {
1203                 dma->srci = 0;
1204                 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
1205                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1206                 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1207                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1208         }
1209         *srcp = dma->srci++;
1210         dma->srck = dma->srci;
1211
1212         if (dma->dsti == HIFN_D_DST_RSIZE) {
1213                 dma->dsti = 0;
1214                 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
1215                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1216                 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1217                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1218         }
1219         *dstp = dma->dsti++;
1220         dma->dstk = dma->dsti;
1221
1222         if (dma->resi == HIFN_D_RES_RSIZE) {
1223                 dma->resi = 0;
1224                 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1225                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1226                 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1227                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1228         }
1229         *resp = dma->resi++;
1230         dma->resk = dma->resi;
1231 }
1232
1233 static int
1234 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1235 {
1236         struct hifn_dma *dma = sc->sc_dma;
1237         hifn_base_command_t wc;
1238         const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1239         int r, cmdi, resi, srci, dsti;
1240
1241         wc.masks = htole16(3 << 13);
1242         wc.session_num = htole16(addr >> 14);
1243         wc.total_source_count = htole16(8);
1244         wc.total_dest_count = htole16(addr & 0x3fff);
1245
1246         hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1247
1248         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1249             HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1250             HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1251
1252         /* build write command */
1253         bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1254         *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc;
1255         bcopy(data, &dma->test_src, sizeof(dma->test_src));
1256
1257         dma->srcr[srci].p = htole32(sc->sc_dma_physaddr
1258             + offsetof(struct hifn_dma, test_src));
1259         dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr
1260             + offsetof(struct hifn_dma, test_dst));
1261
1262         dma->cmdr[cmdi].l = htole32(16 | masks);
1263         dma->srcr[srci].l = htole32(8 | masks);
1264         dma->dstr[dsti].l = htole32(4 | masks);
1265         dma->resr[resi].l = htole32(4 | masks);
1266
1267         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1268             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1269
1270         for (r = 10000; r >= 0; r--) {
1271                 DELAY(10);
1272                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1273                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1274                 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1275                         break;
1276                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1277                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1278         }
1279         if (r == 0) {
1280                 device_printf(sc->sc_dev, "writeramaddr -- "
1281                     "result[%d](addr %d) still valid\n", resi, addr);
1282                 r = -1;
1283                 return (-1);
1284         } else
1285                 r = 0;
1286
1287         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1288             HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1289             HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1290
1291         return (r);
1292 }
1293
1294 static int
1295 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1296 {
1297         struct hifn_dma *dma = sc->sc_dma;
1298         hifn_base_command_t rc;
1299         const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1300         int r, cmdi, srci, dsti, resi;
1301
1302         rc.masks = htole16(2 << 13);
1303         rc.session_num = htole16(addr >> 14);
1304         rc.total_source_count = htole16(addr & 0x3fff);
1305         rc.total_dest_count = htole16(8);
1306
1307         hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1308
1309         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1310             HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1311             HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1312
1313         bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1314         *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc;
1315
1316         dma->srcr[srci].p = htole32(sc->sc_dma_physaddr +
1317             offsetof(struct hifn_dma, test_src));
1318         dma->test_src = 0;
1319         dma->dstr[dsti].p =  htole32(sc->sc_dma_physaddr +
1320             offsetof(struct hifn_dma, test_dst));
1321         dma->test_dst = 0;
1322         dma->cmdr[cmdi].l = htole32(8 | masks);
1323         dma->srcr[srci].l = htole32(8 | masks);
1324         dma->dstr[dsti].l = htole32(8 | masks);
1325         dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1326
1327         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1328             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1329
1330         for (r = 10000; r >= 0; r--) {
1331                 DELAY(10);
1332                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1333                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1334                 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1335                         break;
1336                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1337                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1338         }
1339         if (r == 0) {
1340                 device_printf(sc->sc_dev, "readramaddr -- "
1341                     "result[%d](addr %d) still valid\n", resi, addr);
1342                 r = -1;
1343         } else {
1344                 r = 0;
1345                 bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
1346         }
1347
1348         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1349             HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1350             HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1351
1352         return (r);
1353 }
1354
1355 /*
1356  * Initialize the descriptor rings.
1357  */
1358 static void 
1359 hifn_init_dma(struct hifn_softc *sc)
1360 {
1361         struct hifn_dma *dma = sc->sc_dma;
1362         int i;
1363
1364         hifn_set_retry(sc);
1365
1366         /* initialize static pointer values */
1367         for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1368                 dma->cmdr[i].p = htole32(sc->sc_dma_physaddr +
1369                     offsetof(struct hifn_dma, command_bufs[i][0]));
1370         for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1371                 dma->resr[i].p = htole32(sc->sc_dma_physaddr +
1372                     offsetof(struct hifn_dma, result_bufs[i][0]));
1373
1374         dma->cmdr[HIFN_D_CMD_RSIZE].p =
1375             htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0]));
1376         dma->srcr[HIFN_D_SRC_RSIZE].p =
1377             htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0]));
1378         dma->dstr[HIFN_D_DST_RSIZE].p =
1379             htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0]));
1380         dma->resr[HIFN_D_RES_RSIZE].p =
1381             htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0]));
1382
1383         dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
1384         dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
1385         dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
1386 }
1387
1388 /*
1389  * Writes out the raw command buffer space.  Returns the
1390  * command buffer size.
1391  */
1392 static u_int
1393 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
1394 {
1395         u_int8_t *buf_pos;
1396         hifn_base_command_t *base_cmd;
1397         hifn_mac_command_t *mac_cmd;
1398         hifn_crypt_command_t *cry_cmd;
1399         int using_mac, using_crypt, len;
1400         u_int32_t dlen, slen;
1401
1402         buf_pos = buf;
1403         using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1404         using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1405
1406         base_cmd = (hifn_base_command_t *)buf_pos;
1407         base_cmd->masks = htole16(cmd->base_masks);
1408         slen = cmd->src_mapsize;
1409         if (cmd->sloplen)
1410                 dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t);
1411         else
1412                 dlen = cmd->dst_mapsize;
1413         base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1414         base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1415         dlen >>= 16;
1416         slen >>= 16;
1417         base_cmd->session_num = htole16(cmd->session_num |
1418             ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1419             ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1420         buf_pos += sizeof(hifn_base_command_t);
1421
1422         if (using_mac) {
1423                 mac_cmd = (hifn_mac_command_t *)buf_pos;
1424                 dlen = cmd->maccrd->crd_len;
1425                 mac_cmd->source_count = htole16(dlen & 0xffff);
1426                 dlen >>= 16;
1427                 mac_cmd->masks = htole16(cmd->mac_masks |
1428                     ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1429                 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1430                 mac_cmd->reserved = 0;
1431                 buf_pos += sizeof(hifn_mac_command_t);
1432         }
1433
1434         if (using_crypt) {
1435                 cry_cmd = (hifn_crypt_command_t *)buf_pos;
1436                 dlen = cmd->enccrd->crd_len;
1437                 cry_cmd->source_count = htole16(dlen & 0xffff);
1438                 dlen >>= 16;
1439                 cry_cmd->masks = htole16(cmd->cry_masks |
1440                     ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1441                 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1442                 cry_cmd->reserved = 0;
1443                 buf_pos += sizeof(hifn_crypt_command_t);
1444         }
1445
1446         if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1447                 bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
1448                 buf_pos += HIFN_MAC_KEY_LENGTH;
1449         }
1450
1451         if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1452                 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1453                 case HIFN_CRYPT_CMD_ALG_3DES:
1454                         bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
1455                         buf_pos += HIFN_3DES_KEY_LENGTH;
1456                         break;
1457                 case HIFN_CRYPT_CMD_ALG_DES:
1458                         bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
1459                         buf_pos += cmd->cklen;
1460                         break;
1461                 case HIFN_CRYPT_CMD_ALG_RC4:
1462                         len = 256;
1463                         do {
1464                                 int clen;
1465
1466                                 clen = MIN(cmd->cklen, len);
1467                                 bcopy(cmd->ck, buf_pos, clen);
1468                                 len -= clen;
1469                                 buf_pos += clen;
1470                         } while (len > 0);
1471                         bzero(buf_pos, 4);
1472                         buf_pos += 4;
1473                         break;
1474                 }
1475         }
1476
1477         if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1478                 bcopy(cmd->iv, buf_pos, HIFN_IV_LENGTH);
1479                 buf_pos += HIFN_IV_LENGTH;
1480         }
1481
1482         if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) {
1483                 bzero(buf_pos, 8);
1484                 buf_pos += 8;
1485         }
1486
1487         return (buf_pos - buf);
1488 #undef  MIN
1489 }
1490
1491 static int
1492 hifn_dmamap_aligned(struct hifn_operand *op)
1493 {
1494         int i;
1495
1496         for (i = 0; i < op->nsegs; i++) {
1497                 if (op->segs[i].ds_addr & 3)
1498                         return (0);
1499                 if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3))
1500                         return (0);
1501         }
1502         return (1);
1503 }
1504
1505 static int
1506 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1507 {
1508         struct hifn_dma *dma = sc->sc_dma;
1509         struct hifn_operand *dst = &cmd->dst;
1510         u_int32_t p, l;
1511         int idx, used = 0, i;
1512
1513         idx = dma->dsti;
1514         for (i = 0; i < dst->nsegs - 1; i++) {
1515                 dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1516                 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1517                     HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len);
1518                 HIFN_DSTR_SYNC(sc, idx,
1519                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1520                 used++;
1521
1522                 if (++idx == HIFN_D_DST_RSIZE) {
1523                         dma->dstr[idx].l = htole32(HIFN_D_VALID |
1524                             HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1525                         HIFN_DSTR_SYNC(sc, idx,
1526                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1527                         idx = 0;
1528                 }
1529         }
1530
1531         if (cmd->sloplen == 0) {
1532                 p = dst->segs[i].ds_addr;
1533                 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1534                     dst->segs[i].ds_len;
1535         } else {
1536                 p = sc->sc_dma_physaddr +
1537                     offsetof(struct hifn_dma, slop[cmd->slopidx]);
1538                 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1539                     sizeof(u_int32_t);
1540
1541                 if ((dst->segs[i].ds_len - cmd->sloplen) != 0) {
1542                         dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1543                         dma->dstr[idx].l = htole32(HIFN_D_VALID |
1544                             HIFN_D_MASKDONEIRQ |
1545                             (dst->segs[i].ds_len - cmd->sloplen));
1546                         HIFN_DSTR_SYNC(sc, idx,
1547                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1548                         used++;
1549
1550                         if (++idx == HIFN_D_DST_RSIZE) {
1551                                 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1552                                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1553                                 HIFN_DSTR_SYNC(sc, idx,
1554                                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1555                                 idx = 0;
1556                         }
1557                 }
1558         }
1559         dma->dstr[idx].p = htole32(p);
1560         dma->dstr[idx].l = htole32(l);
1561         HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1562         used++;
1563
1564         if (++idx == HIFN_D_DST_RSIZE) {
1565                 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1566                     HIFN_D_MASKDONEIRQ);
1567                 HIFN_DSTR_SYNC(sc, idx,
1568                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1569                 idx = 0;
1570         }
1571
1572         dma->dsti = idx;
1573         dma->dstu += used;
1574         return (idx);
1575 }
1576
1577 static int
1578 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1579 {
1580         struct hifn_dma *dma = sc->sc_dma;
1581         struct hifn_operand *src = &cmd->src;
1582         int idx, i;
1583         u_int32_t last = 0;
1584
1585         idx = dma->srci;
1586         for (i = 0; i < src->nsegs; i++) {
1587                 if (i == src->nsegs - 1)
1588                         last = HIFN_D_LAST;
1589
1590                 dma->srcr[idx].p = htole32(src->segs[i].ds_addr);
1591                 dma->srcr[idx].l = htole32(src->segs[i].ds_len |
1592                     HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1593                 HIFN_SRCR_SYNC(sc, idx,
1594                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1595
1596                 if (++idx == HIFN_D_SRC_RSIZE) {
1597                         dma->srcr[idx].l = htole32(HIFN_D_VALID |
1598                             HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1599                         HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1600                             BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1601                         idx = 0;
1602                 }
1603         }
1604         dma->srci = idx;
1605         dma->srcu += src->nsegs;
1606         return (idx);
1607
1608
1609 static void
1610 hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error)
1611 {
1612         struct hifn_operand *op = arg;
1613
1614         KASSERT(nsegs <= MAX_SCATTER,
1615                 ("hifn_op_cb: too many DMA segments (%u > %u) "
1616                  "returned when mapping operand", nsegs, MAX_SCATTER));
1617         op->mapsize = mapsize;
1618         op->nsegs = nsegs;
1619         bcopy(seg, op->segs, nsegs * sizeof (seg[0]));
1620 }
1621
1622 static int 
1623 hifn_crypto(
1624         struct hifn_softc *sc,
1625         struct hifn_command *cmd,
1626         struct cryptop *crp,
1627         int hint)
1628 {
1629         struct  hifn_dma *dma = sc->sc_dma;
1630         u_int32_t cmdlen;
1631         int cmdi, resi, err = 0;
1632
1633         /*
1634          * need 1 cmd, and 1 res
1635          *
1636          * NB: check this first since it's easy.
1637          */
1638         if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
1639             (dma->resu + 1) > HIFN_D_RES_RSIZE) {
1640 #ifdef HIFN_DEBUG
1641                 if (hifn_debug) {
1642                         device_printf(sc->sc_dev,
1643                                 "cmd/result exhaustion, cmdu %u resu %u\n",
1644                                 dma->cmdu, dma->resu);
1645                 }
1646 #endif
1647                 hifnstats.hst_nomem_cr++;
1648                 return (ERESTART);
1649         }
1650
1651         if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->src_map)) {
1652                 hifnstats.hst_nomem_map++;
1653                 return (ENOMEM);
1654         }
1655
1656         if (crp->crp_flags & CRYPTO_F_IMBUF) {
1657                 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
1658                     cmd->src_m, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) {
1659                         hifnstats.hst_nomem_load++;
1660                         err = ENOMEM;
1661                         goto err_srcmap1;
1662                 }
1663         } else if (crp->crp_flags & CRYPTO_F_IOV) {
1664                 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
1665                     cmd->src_io, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) {
1666                         hifnstats.hst_nomem_load++;
1667                         err = ENOMEM;
1668                         goto err_srcmap1;
1669                 }
1670         } else {
1671                 err = EINVAL;
1672                 goto err_srcmap1;
1673         }
1674
1675         if (hifn_dmamap_aligned(&cmd->src)) {
1676                 cmd->sloplen = cmd->src_mapsize & 3;
1677                 cmd->dst = cmd->src;
1678         } else {
1679                 if (crp->crp_flags & CRYPTO_F_IOV) {
1680                         err = EINVAL;
1681                         goto err_srcmap;
1682                 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1683                         int totlen, len;
1684                         struct mbuf *m, *m0, *mlast;
1685
1686                         KASSERT(cmd->dst_m == cmd->src_m,
1687                                 ("hifn_crypto: dst_m initialized improperly"));
1688                         hifnstats.hst_unaligned++;
1689                         /*
1690                          * Source is not aligned on a longword boundary.
1691                          * Copy the data to insure alignment.  If we fail
1692                          * to allocate mbufs or clusters while doing this
1693                          * we return ERESTART so the operation is requeued
1694                          * at the crypto later, but only if there are
1695                          * ops already posted to the hardware; otherwise we
1696                          * have no guarantee that we'll be re-entered.
1697                          */
1698                         totlen = cmd->src_mapsize;
1699                         if (cmd->src_m->m_flags & M_PKTHDR) {
1700                                 len = MHLEN;
1701                                 MGETHDR(m0, M_DONTWAIT, MT_DATA);
1702                                 if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_DONTWAIT)) {
1703                                         m_free(m0);
1704                                         m0 = NULL;
1705                                 }
1706                         } else {
1707                                 len = MLEN;
1708                                 MGET(m0, M_DONTWAIT, MT_DATA);
1709                         }
1710                         if (m0 == NULL) {
1711                                 hifnstats.hst_nomem_mbuf++;
1712                                 err = dma->cmdu ? ERESTART : ENOMEM;
1713                                 goto err_srcmap;
1714                         }
1715                         if (totlen >= MINCLSIZE) {
1716                                 MCLGET(m0, M_DONTWAIT);
1717                                 if ((m0->m_flags & M_EXT) == 0) {
1718                                         hifnstats.hst_nomem_mcl++;
1719                                         err = dma->cmdu ? ERESTART : ENOMEM;
1720                                         m_freem(m0);
1721                                         goto err_srcmap;
1722                                 }
1723                                 len = MCLBYTES;
1724                         }
1725                         totlen -= len;
1726                         m0->m_pkthdr.len = m0->m_len = len;
1727                         mlast = m0;
1728
1729                         while (totlen > 0) {
1730                                 MGET(m, M_DONTWAIT, MT_DATA);
1731                                 if (m == NULL) {
1732                                         hifnstats.hst_nomem_mbuf++;
1733                                         err = dma->cmdu ? ERESTART : ENOMEM;
1734                                         m_freem(m0);
1735                                         goto err_srcmap;
1736                                 }
1737                                 len = MLEN;
1738                                 if (totlen >= MINCLSIZE) {
1739                                         MCLGET(m, M_DONTWAIT);
1740                                         if ((m->m_flags & M_EXT) == 0) {
1741                                                 hifnstats.hst_nomem_mcl++;
1742                                                 err = dma->cmdu ? ERESTART : ENOMEM;
1743                                                 mlast->m_next = m;
1744                                                 m_freem(m0);
1745                                                 goto err_srcmap;
1746                                         }
1747                                         len = MCLBYTES;
1748                                 }
1749
1750                                 m->m_len = len;
1751                                 m0->m_pkthdr.len += len;
1752                                 totlen -= len;
1753
1754                                 mlast->m_next = m;
1755                                 mlast = m;
1756                         }
1757                         cmd->dst_m = m0;
1758                 }
1759         }
1760
1761         if (cmd->dst_map == NULL) {
1762                 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->dst_map)) {
1763                         hifnstats.hst_nomem_map++;
1764                         err = ENOMEM;
1765                         goto err_srcmap;
1766                 }
1767                 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1768                         if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
1769                             cmd->dst_m, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) {
1770                                 hifnstats.hst_nomem_map++;
1771                                 err = ENOMEM;
1772                                 goto err_dstmap1;
1773                         }
1774                 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1775                         if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
1776                             cmd->dst_io, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) {
1777                                 hifnstats.hst_nomem_load++;
1778                                 err = ENOMEM;
1779                                 goto err_dstmap1;
1780                         }
1781                 }
1782         }
1783
1784 #ifdef HIFN_DEBUG
1785         if (hifn_debug) {
1786                 device_printf(sc->sc_dev,
1787                     "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1788                     READ_REG_1(sc, HIFN_1_DMA_CSR),
1789                     READ_REG_1(sc, HIFN_1_DMA_IER),
1790                     dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1791                     cmd->src_nsegs, cmd->dst_nsegs);
1792         }
1793 #endif
1794
1795         if (cmd->src_map == cmd->dst_map) {
1796                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1797                     BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1798         } else {
1799                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1800                     BUS_DMASYNC_PREWRITE);
1801                 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
1802                     BUS_DMASYNC_PREREAD);
1803         }
1804
1805         /*
1806          * need N src, and N dst
1807          */
1808         if ((dma->srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE ||
1809             (dma->dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) {
1810 #ifdef HIFN_DEBUG
1811                 if (hifn_debug) {
1812                         device_printf(sc->sc_dev,
1813                                 "src/dst exhaustion, srcu %u+%u dstu %u+%u\n",
1814                                 dma->srcu, cmd->src_nsegs,
1815                                 dma->dstu, cmd->dst_nsegs);
1816                 }
1817 #endif
1818                 hifnstats.hst_nomem_sd++;
1819                 err = ERESTART;
1820                 goto err_dstmap;
1821         }
1822
1823         if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1824                 dma->cmdi = 0;
1825                 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1826                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1827                 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1828                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1829         }
1830         cmdi = dma->cmdi++;
1831         cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
1832         HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
1833
1834         /* .p for command/result already set */
1835         dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
1836             HIFN_D_MASKDONEIRQ);
1837         HIFN_CMDR_SYNC(sc, cmdi,
1838             BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1839         dma->cmdu++;
1840         if (sc->sc_c_busy == 0) {
1841                 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
1842                 sc->sc_c_busy = 1;
1843         }
1844
1845         /*
1846          * We don't worry about missing an interrupt (which a "command wait"
1847          * interrupt salvages us from), unless there is more than one command
1848          * in the queue.
1849          */
1850         if (dma->cmdu > 1) {
1851                 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
1852                 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1853         }
1854
1855         hifnstats.hst_ipackets++;
1856         hifnstats.hst_ibytes += cmd->src_mapsize;
1857
1858         hifn_dmamap_load_src(sc, cmd);
1859         if (sc->sc_s_busy == 0) {
1860                 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
1861                 sc->sc_s_busy = 1;
1862         }
1863
1864         /*
1865          * Unlike other descriptors, we don't mask done interrupt from
1866          * result descriptor.
1867          */
1868 #ifdef HIFN_DEBUG
1869         if (hifn_debug)
1870                 printf("load res\n");
1871 #endif
1872         if (dma->resi == HIFN_D_RES_RSIZE) {
1873                 dma->resi = 0;
1874                 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1875                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1876                 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1877                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1878         }
1879         resi = dma->resi++;
1880         KASSERT(dma->hifn_commands[resi] == NULL,
1881                 ("hifn_crypto: command slot %u busy", resi));
1882         dma->hifn_commands[resi] = cmd;
1883         HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
1884         if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) {
1885                 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
1886                     HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
1887                 sc->sc_curbatch++;
1888                 if (sc->sc_curbatch > hifnstats.hst_maxbatch)
1889                         hifnstats.hst_maxbatch = sc->sc_curbatch;
1890                 hifnstats.hst_totbatch++;
1891         } else {
1892                 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
1893                     HIFN_D_VALID | HIFN_D_LAST);
1894                 sc->sc_curbatch = 0;
1895         }
1896         HIFN_RESR_SYNC(sc, resi,
1897             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1898         dma->resu++;
1899         if (sc->sc_r_busy == 0) {
1900                 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
1901                 sc->sc_r_busy = 1;
1902         }
1903
1904         if (cmd->sloplen)
1905                 cmd->slopidx = resi;
1906
1907         hifn_dmamap_load_dst(sc, cmd);
1908
1909         if (sc->sc_d_busy == 0) {
1910                 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
1911                 sc->sc_d_busy = 1;
1912         }
1913
1914 #ifdef HIFN_DEBUG
1915         if (hifn_debug) {
1916                 device_printf(sc->sc_dev, "command: stat %8x ier %8x\n",
1917                     READ_REG_1(sc, HIFN_1_DMA_CSR),
1918                     READ_REG_1(sc, HIFN_1_DMA_IER));
1919         }
1920 #endif
1921
1922         sc->sc_active = 5;
1923         KASSERT(err == 0, ("hifn_crypto: success with error %u", err));
1924         return (err);           /* success */
1925
1926 err_dstmap:
1927         if (cmd->src_map != cmd->dst_map)
1928                 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
1929 err_dstmap1:
1930         if (cmd->src_map != cmd->dst_map)
1931                 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
1932 err_srcmap:
1933         if (crp->crp_flags & CRYPTO_F_IMBUF) {
1934                 if (cmd->src_m != cmd->dst_m)
1935                         m_freem(cmd->dst_m);
1936         }
1937         bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
1938 err_srcmap1:
1939         bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
1940         return (err);
1941 }
1942
1943 static void
1944 hifn_tick(void* vsc)
1945 {
1946         struct hifn_softc *sc = vsc;
1947         int s;
1948
1949         s = splimp();
1950         if (sc->sc_active == 0) {
1951                 struct hifn_dma *dma = sc->sc_dma;
1952                 u_int32_t r = 0;
1953
1954                 if (dma->cmdu == 0 && sc->sc_c_busy) {
1955                         sc->sc_c_busy = 0;
1956                         r |= HIFN_DMACSR_C_CTRL_DIS;
1957                 }
1958                 if (dma->srcu == 0 && sc->sc_s_busy) {
1959                         sc->sc_s_busy = 0;
1960                         r |= HIFN_DMACSR_S_CTRL_DIS;
1961                 }
1962                 if (dma->dstu == 0 && sc->sc_d_busy) {
1963                         sc->sc_d_busy = 0;
1964                         r |= HIFN_DMACSR_D_CTRL_DIS;
1965                 }
1966                 if (dma->resu == 0 && sc->sc_r_busy) {
1967                         sc->sc_r_busy = 0;
1968                         r |= HIFN_DMACSR_R_CTRL_DIS;
1969                 }
1970                 if (r)
1971                         WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
1972         } else
1973                 sc->sc_active--;
1974         splx(s);
1975         callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
1976 }
1977
1978 static void 
1979 hifn_intr(void *arg)
1980 {
1981         struct hifn_softc *sc = arg;
1982         struct hifn_dma *dma;
1983         u_int32_t dmacsr, restart;
1984         int i, u;
1985
1986         dma = sc->sc_dma;
1987
1988         dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
1989
1990 #ifdef HIFN_DEBUG
1991         if (hifn_debug) {
1992                 device_printf(sc->sc_dev,
1993                     "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n",
1994                     dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier,
1995                     dma->cmdi, dma->srci, dma->dsti, dma->resi,
1996                     dma->cmdk, dma->srck, dma->dstk, dma->resk,
1997                     dma->cmdu, dma->srcu, dma->dstu, dma->resu);
1998         }
1999 #endif
2000
2001         /* Nothing in the DMA unit interrupted */
2002         if ((dmacsr & sc->sc_dmaier) == 0) {
2003                 hifnstats.hst_noirq++;
2004                 return;
2005         }
2006
2007         WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
2008
2009         if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
2010             (dmacsr & HIFN_DMACSR_PUBDONE))
2011                 WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
2012                     READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
2013
2014         restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER);
2015         if (restart)
2016                 device_printf(sc->sc_dev, "overrun %x\n", dmacsr);
2017
2018         if (sc->sc_flags & HIFN_IS_7811) {
2019                 if (dmacsr & HIFN_DMACSR_ILLR)
2020                         device_printf(sc->sc_dev, "illegal read\n");
2021                 if (dmacsr & HIFN_DMACSR_ILLW)
2022                         device_printf(sc->sc_dev, "illegal write\n");
2023         }
2024
2025         restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
2026             HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
2027         if (restart) {
2028                 device_printf(sc->sc_dev, "abort, resetting.\n");
2029                 hifnstats.hst_abort++;
2030                 hifn_abort(sc);
2031                 return;
2032         }
2033
2034         if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
2035                 /*
2036                  * If no slots to process and we receive a "waiting on
2037                  * command" interrupt, we disable the "waiting on command"
2038                  * (by clearing it).
2039                  */
2040                 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
2041                 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2042         }
2043
2044         /* clear the rings */
2045         i = dma->resk; u = dma->resu;
2046         while (u != 0) {
2047                 HIFN_RESR_SYNC(sc, i,
2048                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2049                 if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
2050                         HIFN_RESR_SYNC(sc, i,
2051                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2052                         break;
2053                 }
2054
2055                 if (i != HIFN_D_RES_RSIZE) {
2056                         struct hifn_command *cmd;
2057                         u_int8_t *macbuf = NULL;
2058
2059                         HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
2060                         cmd = dma->hifn_commands[i];
2061                         KASSERT(cmd != NULL,
2062                                 ("hifn_intr: null command slot %u", i));
2063                         dma->hifn_commands[i] = NULL;
2064
2065                         if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2066                                 macbuf = dma->result_bufs[i];
2067                                 macbuf += 12;
2068                         }
2069
2070                         hifn_callback(sc, cmd, macbuf);
2071                         hifnstats.hst_opackets++;
2072                         u--;
2073                 }
2074
2075                 if (++i == (HIFN_D_RES_RSIZE + 1))
2076                         i = 0;
2077         }
2078         dma->resk = i; dma->resu = u;
2079
2080         i = dma->srck; u = dma->srcu;
2081         while (u != 0) {
2082                 if (i == HIFN_D_SRC_RSIZE)
2083                         i = 0;
2084                 HIFN_SRCR_SYNC(sc, i,
2085                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2086                 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
2087                         HIFN_SRCR_SYNC(sc, i,
2088                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2089                         break;
2090                 }
2091                 i++, u--;
2092         }
2093         dma->srck = i; dma->srcu = u;
2094
2095         i = dma->cmdk; u = dma->cmdu;
2096         while (u != 0) {
2097                 HIFN_CMDR_SYNC(sc, i,
2098                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2099                 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
2100                         HIFN_CMDR_SYNC(sc, i,
2101                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2102                         break;
2103                 }
2104                 if (i != HIFN_D_CMD_RSIZE) {
2105                         u--;
2106                         HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
2107                 }
2108                 if (++i == (HIFN_D_CMD_RSIZE + 1))
2109                         i = 0;
2110         }
2111         dma->cmdk = i; dma->cmdu = u;
2112
2113         if (sc->sc_needwakeup) {                /* XXX check high watermark */
2114                 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
2115 #ifdef HIFN_DEBUG
2116                 if (hifn_debug)
2117                         device_printf(sc->sc_dev,
2118                                 "wakeup crypto (%x) u %d/%d/%d/%d\n",
2119                                 sc->sc_needwakeup,
2120                                 dma->cmdu, dma->srcu, dma->dstu, dma->resu);
2121 #endif
2122                 sc->sc_needwakeup &= ~wakeup;
2123                 crypto_unblock(sc->sc_cid, wakeup);
2124         }
2125 }
2126
2127 /*
2128  * Allocate a new 'session' and return an encoded session id.  'sidp'
2129  * contains our registration id, and should contain an encoded session
2130  * id on successful allocation.
2131  */
2132 static int
2133 hifn_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri)
2134 {
2135         struct cryptoini *c;
2136         struct hifn_softc *sc = arg;
2137         int i, mac = 0, cry = 0;
2138
2139         KASSERT(sc != NULL, ("hifn_newsession: null softc"));
2140         if (sidp == NULL || cri == NULL || sc == NULL)
2141                 return (EINVAL);
2142
2143         for (i = 0; i < sc->sc_maxses; i++)
2144                 if (sc->sc_sessions[i].hs_state == HS_STATE_FREE)
2145                         break;
2146         if (i == sc->sc_maxses)
2147                 return (ENOMEM);
2148
2149         for (c = cri; c != NULL; c = c->cri_next) {
2150                 switch (c->cri_alg) {
2151                 case CRYPTO_MD5:
2152                 case CRYPTO_SHA1:
2153                 case CRYPTO_MD5_HMAC:
2154                 case CRYPTO_SHA1_HMAC:
2155                         if (mac)
2156                                 return (EINVAL);
2157                         mac = 1;
2158                         break;
2159                 case CRYPTO_DES_CBC:
2160                 case CRYPTO_3DES_CBC:
2161                         /* XXX this may read fewer, does it matter? */
2162                         read_random(sc->sc_sessions[i].hs_iv, HIFN_IV_LENGTH);
2163                         /*FALLTHROUGH*/
2164                 case CRYPTO_ARC4:
2165                         if (cry)
2166                                 return (EINVAL);
2167                         cry = 1;
2168                         break;
2169                 default:
2170                         return (EINVAL);
2171                 }
2172         }
2173         if (mac == 0 && cry == 0)
2174                 return (EINVAL);
2175
2176         *sidp = HIFN_SID(device_get_unit(sc->sc_dev), i);
2177         sc->sc_sessions[i].hs_state = HS_STATE_USED;
2178
2179         return (0);
2180 }
2181
2182 /*
2183  * Deallocate a session.
2184  * XXX this routine should run a zero'd mac/encrypt key into context ram.
2185  * XXX to blow away any keys already stored there.
2186  */
2187 static int
2188 hifn_freesession(void *arg, u_int64_t tid)
2189 {
2190         struct hifn_softc *sc = arg;
2191         int session;
2192         u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
2193
2194         KASSERT(sc != NULL, ("hifn_freesession: null softc"));
2195         if (sc == NULL)
2196                 return (EINVAL);
2197
2198         session = HIFN_SESSION(sid);
2199         if (session >= sc->sc_maxses)
2200                 return (EINVAL);
2201
2202         bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
2203         return (0);
2204 }
2205
2206 static int
2207 hifn_process(void *arg, struct cryptop *crp, int hint)
2208 {
2209         struct hifn_softc *sc = arg;
2210         struct hifn_command *cmd = NULL;
2211         int session, err;
2212         struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
2213
2214         if (crp == NULL || crp->crp_callback == NULL) {
2215                 hifnstats.hst_invalid++;
2216                 return (EINVAL);
2217         }
2218         session = HIFN_SESSION(crp->crp_sid);
2219
2220         if (sc == NULL || session >= sc->sc_maxses) {
2221                 err = EINVAL;
2222                 goto errout;
2223         }
2224
2225         cmd = malloc(sizeof(struct hifn_command), M_DEVBUF, M_NOWAIT | M_ZERO);
2226         if (cmd == NULL) {
2227                 hifnstats.hst_nomem++;
2228                 err = ENOMEM;
2229                 goto errout;
2230         }
2231
2232         if (crp->crp_flags & CRYPTO_F_IMBUF) {
2233                 cmd->src_m = (struct mbuf *)crp->crp_buf;
2234                 cmd->dst_m = (struct mbuf *)crp->crp_buf;
2235         } else if (crp->crp_flags & CRYPTO_F_IOV) {
2236                 cmd->src_io = (struct uio *)crp->crp_buf;
2237                 cmd->dst_io = (struct uio *)crp->crp_buf;
2238         } else {
2239                 err = EINVAL;
2240                 goto errout;    /* XXX we don't handle contiguous buffers! */
2241         }
2242
2243         crd1 = crp->crp_desc;
2244         if (crd1 == NULL) {
2245                 err = EINVAL;
2246                 goto errout;
2247         }
2248         crd2 = crd1->crd_next;
2249
2250         if (crd2 == NULL) {
2251                 if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
2252                     crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2253                     crd1->crd_alg == CRYPTO_SHA1 ||
2254                     crd1->crd_alg == CRYPTO_MD5) {
2255                         maccrd = crd1;
2256                         enccrd = NULL;
2257                 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
2258                     crd1->crd_alg == CRYPTO_3DES_CBC ||
2259                     crd1->crd_alg == CRYPTO_ARC4) {
2260                         if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
2261                                 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2262                         maccrd = NULL;
2263                         enccrd = crd1;
2264                 } else {
2265                         err = EINVAL;
2266                         goto errout;
2267                 }
2268         } else {
2269                 if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
2270                      crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2271                      crd1->crd_alg == CRYPTO_MD5 ||
2272                      crd1->crd_alg == CRYPTO_SHA1) &&
2273                     (crd2->crd_alg == CRYPTO_DES_CBC ||
2274                      crd2->crd_alg == CRYPTO_3DES_CBC ||
2275                      crd2->crd_alg == CRYPTO_ARC4) &&
2276                     ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2277                         cmd->base_masks = HIFN_BASE_CMD_DECODE;
2278                         maccrd = crd1;
2279                         enccrd = crd2;
2280                 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2281                      crd1->crd_alg == CRYPTO_ARC4 ||
2282                      crd1->crd_alg == CRYPTO_3DES_CBC) &&
2283                     (crd2->crd_alg == CRYPTO_MD5_HMAC ||
2284                      crd2->crd_alg == CRYPTO_SHA1_HMAC ||
2285                      crd2->crd_alg == CRYPTO_MD5 ||
2286                      crd2->crd_alg == CRYPTO_SHA1) &&
2287                     (crd1->crd_flags & CRD_F_ENCRYPT)) {
2288                         enccrd = crd1;
2289                         maccrd = crd2;
2290                 } else {
2291                         /*
2292                          * We cannot order the 7751 as requested
2293                          */
2294                         err = EINVAL;
2295                         goto errout;
2296                 }
2297         }
2298
2299         if (enccrd) {
2300                 cmd->enccrd = enccrd;
2301                 cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2302                 switch (enccrd->crd_alg) {
2303                 case CRYPTO_ARC4:
2304                         cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2305                         if ((enccrd->crd_flags & CRD_F_ENCRYPT)
2306                             != sc->sc_sessions[session].hs_prev_op)
2307                                 sc->sc_sessions[session].hs_state =
2308                                     HS_STATE_USED;
2309                         break;
2310                 case CRYPTO_DES_CBC:
2311                         cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2312                             HIFN_CRYPT_CMD_MODE_CBC |
2313                             HIFN_CRYPT_CMD_NEW_IV;
2314                         break;
2315                 case CRYPTO_3DES_CBC:
2316                         cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2317                             HIFN_CRYPT_CMD_MODE_CBC |
2318                             HIFN_CRYPT_CMD_NEW_IV;
2319                         break;
2320                 default:
2321                         err = EINVAL;
2322                         goto errout;
2323                 }
2324                 if (enccrd->crd_alg != CRYPTO_ARC4) {
2325                         if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2326                                 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2327                                         bcopy(enccrd->crd_iv, cmd->iv,
2328                                             HIFN_IV_LENGTH);
2329                                 else
2330                                         bcopy(sc->sc_sessions[session].hs_iv,
2331                                             cmd->iv, HIFN_IV_LENGTH);
2332
2333                                 if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2334                                     == 0) {
2335                                         if (crp->crp_flags & CRYPTO_F_IMBUF)
2336                                                 m_copyback(cmd->src_m,
2337                                                     enccrd->crd_inject,
2338                                                     HIFN_IV_LENGTH, cmd->iv);
2339                                         else if (crp->crp_flags & CRYPTO_F_IOV)
2340                                                 cuio_copyback(cmd->src_io,
2341                                                     enccrd->crd_inject,
2342                                                     HIFN_IV_LENGTH, cmd->iv);
2343                                 }
2344                         } else {
2345                                 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2346                                         bcopy(enccrd->crd_iv, cmd->iv,
2347                                             HIFN_IV_LENGTH);
2348                                 else if (crp->crp_flags & CRYPTO_F_IMBUF)
2349                                         m_copydata(cmd->src_m,
2350                                             enccrd->crd_inject,
2351                                             HIFN_IV_LENGTH, cmd->iv);
2352                                 else if (crp->crp_flags & CRYPTO_F_IOV)
2353                                         cuio_copydata(cmd->src_io,
2354                                             enccrd->crd_inject,
2355                                             HIFN_IV_LENGTH, cmd->iv);
2356                         }
2357                 }
2358
2359                 cmd->ck = enccrd->crd_key;
2360                 cmd->cklen = enccrd->crd_klen >> 3;
2361
2362                 if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2363                         cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2364         }
2365
2366         if (maccrd) {
2367                 cmd->maccrd = maccrd;
2368                 cmd->base_masks |= HIFN_BASE_CMD_MAC;
2369
2370                 switch (maccrd->crd_alg) {
2371                 case CRYPTO_MD5:
2372                         cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2373                             HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2374                             HIFN_MAC_CMD_POS_IPSEC;
2375                        break;
2376                 case CRYPTO_MD5_HMAC:
2377                         cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2378                             HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2379                             HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2380                         break;
2381                 case CRYPTO_SHA1:
2382                         cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2383                             HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2384                             HIFN_MAC_CMD_POS_IPSEC;
2385                         break;
2386                 case CRYPTO_SHA1_HMAC:
2387                         cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2388                             HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2389                             HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2390                         break;
2391                 }
2392
2393                 if ((maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
2394                      maccrd->crd_alg == CRYPTO_MD5_HMAC) &&
2395                     sc->sc_sessions[session].hs_state == HS_STATE_USED) {
2396                         cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2397                         bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
2398                         bzero(cmd->mac + (maccrd->crd_klen >> 3),
2399                             HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2400                 }
2401         }
2402
2403         cmd->crp = crp;
2404         cmd->session_num = session;
2405         cmd->softc = sc;
2406
2407         err = hifn_crypto(sc, cmd, crp, hint);
2408         if (!err) {
2409                 if (enccrd)
2410                         sc->sc_sessions[session].hs_prev_op =
2411                                 enccrd->crd_flags & CRD_F_ENCRYPT;
2412                 if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2413                         sc->sc_sessions[session].hs_state = HS_STATE_KEY;
2414                 return 0;
2415         } else if (err == ERESTART) {
2416                 /*
2417                  * There weren't enough resources to dispatch the request
2418                  * to the part.  Notify the caller so they'll requeue this
2419                  * request and resubmit it again soon.
2420                  */
2421 #ifdef HIFN_DEBUG
2422                 if (hifn_debug)
2423                         device_printf(sc->sc_dev, "requeue request\n");
2424 #endif
2425                 free(cmd, M_DEVBUF);
2426                 sc->sc_needwakeup |= CRYPTO_SYMQ;
2427                 return (err);
2428         }
2429
2430 errout:
2431         if (cmd != NULL)
2432                 free(cmd, M_DEVBUF);
2433         if (err == EINVAL)
2434                 hifnstats.hst_invalid++;
2435         else
2436                 hifnstats.hst_nomem++;
2437         crp->crp_etype = err;
2438         crypto_done(crp);
2439         return (err);
2440 }
2441
2442 static void
2443 hifn_abort(struct hifn_softc *sc)
2444 {
2445         struct hifn_dma *dma = sc->sc_dma;
2446         struct hifn_command *cmd;
2447         struct cryptop *crp;
2448         int i, u;
2449
2450         i = dma->resk; u = dma->resu;
2451         while (u != 0) {
2452                 cmd = dma->hifn_commands[i];
2453                 KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i));
2454                 dma->hifn_commands[i] = NULL;
2455                 crp = cmd->crp;
2456
2457                 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2458                         /* Salvage what we can. */
2459                         u_int8_t *macbuf;
2460
2461                         if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2462                                 macbuf = dma->result_bufs[i];
2463                                 macbuf += 12;
2464                         } else
2465                                 macbuf = NULL;
2466                         hifnstats.hst_opackets++;
2467                         hifn_callback(sc, cmd, macbuf);
2468                 } else {
2469                         if (cmd->src_map == cmd->dst_map) {
2470                                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2471                                     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2472                         } else {
2473                                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2474                                     BUS_DMASYNC_POSTWRITE);
2475                                 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2476                                     BUS_DMASYNC_POSTREAD);
2477                         }
2478
2479                         if (cmd->src_m != cmd->dst_m) {
2480                                 m_freem(cmd->src_m);
2481                                 crp->crp_buf = (caddr_t)cmd->dst_m;
2482                         }
2483
2484                         /* non-shared buffers cannot be restarted */
2485                         if (cmd->src_map != cmd->dst_map) {
2486                                 /*
2487                                  * XXX should be EAGAIN, delayed until
2488                                  * after the reset.
2489                                  */
2490                                 crp->crp_etype = ENOMEM;
2491                                 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2492                                 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2493                         } else
2494                                 crp->crp_etype = ENOMEM;
2495
2496                         bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2497                         bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2498
2499                         free(cmd, M_DEVBUF);
2500                         if (crp->crp_etype != EAGAIN)
2501                                 crypto_done(crp);
2502                 }
2503
2504                 if (++i == HIFN_D_RES_RSIZE)
2505                         i = 0;
2506                 u--;
2507         }
2508         dma->resk = i; dma->resu = u;
2509
2510         /* Force upload of key next time */
2511         for (i = 0; i < sc->sc_maxses; i++)
2512                 if (sc->sc_sessions[i].hs_state == HS_STATE_KEY)
2513                         sc->sc_sessions[i].hs_state = HS_STATE_USED;
2514         
2515         hifn_reset_board(sc, 1);
2516         hifn_init_dma(sc);
2517         hifn_init_pci_registers(sc);
2518 }
2519
2520 static void
2521 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf)
2522 {
2523         struct hifn_dma *dma = sc->sc_dma;
2524         struct cryptop *crp = cmd->crp;
2525         struct cryptodesc *crd;
2526         struct mbuf *m;
2527         int totlen, i, u;
2528
2529         if (cmd->src_map == cmd->dst_map) {
2530                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2531                     BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2532         } else {
2533                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2534                     BUS_DMASYNC_POSTWRITE);
2535                 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2536                     BUS_DMASYNC_POSTREAD);
2537         }
2538
2539         if (crp->crp_flags & CRYPTO_F_IMBUF) {
2540                 if (cmd->src_m != cmd->dst_m) {
2541                         crp->crp_buf = (caddr_t)cmd->dst_m;
2542                         totlen = cmd->src_mapsize;
2543                         for (m = cmd->dst_m; m != NULL; m = m->m_next) {
2544                                 if (totlen < m->m_len) {
2545                                         m->m_len = totlen;
2546                                         totlen = 0;
2547                                 } else
2548                                         totlen -= m->m_len;
2549                         }
2550                         cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len;
2551                         m_freem(cmd->src_m);
2552                 }
2553         }
2554
2555         if (cmd->sloplen != 0) {
2556                 if (crp->crp_flags & CRYPTO_F_IMBUF)
2557                         m_copyback((struct mbuf *)crp->crp_buf,
2558                             cmd->src_mapsize - cmd->sloplen,
2559                             cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]);
2560                 else if (crp->crp_flags & CRYPTO_F_IOV)
2561                         cuio_copyback((struct uio *)crp->crp_buf,
2562                             cmd->src_mapsize - cmd->sloplen,
2563                             cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]);
2564         }
2565
2566         i = dma->dstk; u = dma->dstu;
2567         while (u != 0) {
2568                 if (i == HIFN_D_DST_RSIZE)
2569                         i = 0;
2570                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2571                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2572                 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2573                         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2574                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2575                         break;
2576                 }
2577                 i++, u--;
2578         }
2579         dma->dstk = i; dma->dstu = u;
2580
2581         hifnstats.hst_obytes += cmd->dst_mapsize;
2582
2583         if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
2584             HIFN_BASE_CMD_CRYPT) {
2585                 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2586                         if (crd->crd_alg != CRYPTO_DES_CBC &&
2587                             crd->crd_alg != CRYPTO_3DES_CBC)
2588                                 continue;
2589                         if (crp->crp_flags & CRYPTO_F_IMBUF)
2590                                 m_copydata((struct mbuf *)crp->crp_buf,
2591                                     crd->crd_skip + crd->crd_len - HIFN_IV_LENGTH,
2592                                     HIFN_IV_LENGTH,
2593                                     cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2594                         else if (crp->crp_flags & CRYPTO_F_IOV) {
2595                                 cuio_copydata((struct uio *)crp->crp_buf,
2596                                     crd->crd_skip + crd->crd_len - HIFN_IV_LENGTH,
2597                                     HIFN_IV_LENGTH,
2598                                     cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2599                         }
2600                         break;
2601                 }
2602         }
2603
2604         if (macbuf != NULL) {
2605                 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2606                         int len;
2607
2608                         if (crd->crd_alg == CRYPTO_MD5)
2609                                 len = 16;
2610                         else if (crd->crd_alg == CRYPTO_SHA1)
2611                                 len = 20;
2612                         else if (crd->crd_alg == CRYPTO_MD5_HMAC ||
2613                             crd->crd_alg == CRYPTO_SHA1_HMAC)
2614                                 len = 12;
2615                         else
2616                                 continue;
2617
2618                         if (crp->crp_flags & CRYPTO_F_IMBUF)
2619                                 m_copyback((struct mbuf *)crp->crp_buf,
2620                                    crd->crd_inject, len, macbuf);
2621                         else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac)
2622                                 bcopy((caddr_t)macbuf, crp->crp_mac, len);
2623                         break;
2624                 }
2625         }
2626
2627         if (cmd->src_map != cmd->dst_map) {
2628                 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2629                 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2630         }
2631         bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2632         bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2633         free(cmd, M_DEVBUF);
2634         crypto_done(crp);
2635 }
2636
2637 /*
2638  * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
2639  * and Group 1 registers; avoid conditions that could create
2640  * burst writes by doing a read in between the writes.
2641  *
2642  * NB: The read we interpose is always to the same register;
2643  *     we do this because reading from an arbitrary (e.g. last)
2644  *     register may not always work.
2645  */
2646 static void
2647 hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2648 {
2649         if (sc->sc_flags & HIFN_IS_7811) {
2650                 if (sc->sc_bar0_lastreg == reg - 4)
2651                         bus_space_read_4(sc->sc_st0, sc->sc_sh0, HIFN_0_PUCNFG);
2652                 sc->sc_bar0_lastreg = reg;
2653         }
2654         bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
2655 }
2656
2657 static void
2658 hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2659 {
2660         if (sc->sc_flags & HIFN_IS_7811) {
2661                 if (sc->sc_bar1_lastreg == reg - 4)
2662                         bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
2663                 sc->sc_bar1_lastreg = reg;
2664         }
2665         bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
2666 }