bnx: Split RX/TX serializers
[dragonfly.git] / sys / dev / netif / bnx / if_bnx.c
index d0ac885..170fcac 100644 (file)
@@ -34,7 +34,7 @@
  */
 
 #include "opt_bnx.h"
-#include "opt_polling.h"
+#include "opt_ifpoll.h"
 
 #include <sys/param.h>
 #include <sys/bus.h>
@@ -59,6 +59,7 @@
 #include <net/if_arp.h>
 #include <net/if_dl.h>
 #include <net/if_media.h>
+#include <net/if_poll.h>
 #include <net/if_types.h>
 #include <net/ifq_var.h>
 #include <net/vlan/if_vlan_var.h>
@@ -89,6 +90,8 @@ static const struct bnx_type {
 } bnx_devs[] = {
        { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717,
                "Broadcom BCM5717 Gigabit Ethernet" },
+       { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717C,
+               "Broadcom BCM5717C Gigabit Ethernet" },
        { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718,
                "Broadcom BCM5718 Gigabit Ethernet" },
        { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5719,
@@ -96,6 +99,13 @@ static const struct bnx_type {
        { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720_ALT,
                "Broadcom BCM5720 Gigabit Ethernet" },
 
+       { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5725,
+               "Broadcom BCM5725 Gigabit Ethernet" },
+       { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5727,
+               "Broadcom BCM5727 Gigabit Ethernet" },
+       { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5762,
+               "Broadcom BCM5762 Gigabit Ethernet" },
+
        { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57761,
                "Broadcom BCM57761 Gigabit Ethernet" },
        { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57762,
@@ -138,8 +148,9 @@ static int  bnx_miibus_readreg(device_t, int, int);
 static int     bnx_miibus_writereg(device_t, int, int, int);
 static void    bnx_miibus_statchg(device_t);
 
-#ifdef DEVICE_POLLING
-static void    bnx_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
+#ifdef IFPOLL_ENABLE
+static void    bnx_npoll(struct ifnet *, struct ifpoll_info *);
+static void    bnx_npoll_compat(struct ifnet *, void *, int);
 #endif
 static void    bnx_intr_legacy(void *);
 static void    bnx_msi(void *);
@@ -147,10 +158,15 @@ static void       bnx_msi_oneshot(void *);
 static void    bnx_intr(struct bnx_softc *);
 static void    bnx_enable_intr(struct bnx_softc *);
 static void    bnx_disable_intr(struct bnx_softc *);
-static void    bnx_txeof(struct bnx_softc *, uint16_t);
-static void    bnx_rxeof(struct bnx_softc *, uint16_t);
-
-static void    bnx_start(struct ifnet *);
+static void    bnx_txeof(struct bnx_tx_ring *, uint16_t);
+static void    bnx_rxeof(struct bnx_rx_ret_ring *, uint16_t, int);
+static int     bnx_alloc_intr(struct bnx_softc *);
+static int     bnx_setup_intr(struct bnx_softc *);
+static void    bnx_free_intr(struct bnx_softc *);
+static void    bnx_teardown_intr(struct bnx_softc *, int);
+static void    bnx_check_intr(void *);
+
+static void    bnx_start(struct ifnet *, struct ifaltq_subque *);
 static int     bnx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
 static void    bnx_init(void *);
 static void    bnx_stop(struct bnx_softc *);
@@ -158,6 +174,13 @@ static void        bnx_watchdog(struct ifnet *);
 static int     bnx_ifmedia_upd(struct ifnet *);
 static void    bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *);
 static void    bnx_tick(void *);
+static void    bnx_serialize(struct ifnet *, enum ifnet_serialize);
+static void    bnx_deserialize(struct ifnet *, enum ifnet_serialize);
+static int     bnx_tryserialize(struct ifnet *, enum ifnet_serialize);
+#ifdef INVARIANTS
+static void    bnx_serialize_assert(struct ifnet *, enum ifnet_serialize,
+                   boolean_t);
+#endif
 
 static int     bnx_alloc_jumbo_mem(struct bnx_softc *);
 static void    bnx_free_jumbo_mem(struct bnx_softc *);
@@ -165,26 +188,32 @@ static struct bnx_jslot
                *bnx_jalloc(struct bnx_softc *);
 static void    bnx_jfree(void *);
 static void    bnx_jref(void *);
-static int     bnx_newbuf_std(struct bnx_softc *, int, int);
+static int     bnx_newbuf_std(struct bnx_rx_ret_ring *, int, int);
 static int     bnx_newbuf_jumbo(struct bnx_softc *, int, int);
-static void    bnx_setup_rxdesc_std(struct bnx_softc *, int);
+static void    bnx_setup_rxdesc_std(struct bnx_rx_std_ring *, int);
 static void    bnx_setup_rxdesc_jumbo(struct bnx_softc *, int);
-static int     bnx_init_rx_ring_std(struct bnx_softc *);
-static void    bnx_free_rx_ring_std(struct bnx_softc *);
+static int     bnx_init_rx_ring_std(struct bnx_rx_std_ring *);
+static void    bnx_free_rx_ring_std(struct bnx_rx_std_ring *);
 static int     bnx_init_rx_ring_jumbo(struct bnx_softc *);
 static void    bnx_free_rx_ring_jumbo(struct bnx_softc *);
-static void    bnx_free_tx_ring(struct bnx_softc *);
-static int     bnx_init_tx_ring(struct bnx_softc *);
-static int     bnx_dma_alloc(struct bnx_softc *);
+static void    bnx_free_tx_ring(struct bnx_tx_ring *);
+static int     bnx_init_tx_ring(struct bnx_tx_ring *);
+static int     bnx_create_tx_ring(struct bnx_tx_ring *);
+static void    bnx_destroy_tx_ring(struct bnx_tx_ring *);
+static int     bnx_create_rx_ret_ring(struct bnx_rx_ret_ring *);
+static void    bnx_destroy_rx_ret_ring(struct bnx_rx_ret_ring *);
+static int     bnx_dma_alloc(device_t);
 static void    bnx_dma_free(struct bnx_softc *);
 static int     bnx_dma_block_alloc(struct bnx_softc *, bus_size_t,
                    bus_dma_tag_t *, bus_dmamap_t *, void **, bus_addr_t *);
 static void    bnx_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
 static struct mbuf *
                bnx_defrag_shortdma(struct mbuf *);
-static int     bnx_encap(struct bnx_softc *, struct mbuf **, uint32_t *);
-static int     bnx_setup_tso(struct bnx_softc *, struct mbuf **,
+static int     bnx_encap(struct bnx_tx_ring *, struct mbuf **,
+                   uint32_t *, int *);
+static int     bnx_setup_tso(struct bnx_tx_ring *, struct mbuf **,
                    uint16_t *, uint16_t *);
+static void    bnx_setup_serialize(struct bnx_softc *);
 
 static void    bnx_reset(struct bnx_softc *);
 static int     bnx_chipinit(struct bnx_softc *);
@@ -201,10 +230,8 @@ static void        bnx_writemem_ind(struct bnx_softc *, uint32_t, uint32_t);
 #ifdef notdef
 static uint32_t        bnx_readreg_ind(struct bnx_softc *, uint32_t);
 #endif
-static void    bnx_writereg_ind(struct bnx_softc *, uint32_t, uint32_t);
 static void    bnx_writemem_direct(struct bnx_softc *, uint32_t, uint32_t);
 static void    bnx_writembx(struct bnx_softc *, int, int);
-static uint8_t bnx_nvram_getbyte(struct bnx_softc *, int, uint8_t *);
 static int     bnx_read_nvram(struct bnx_softc *, caddr_t, int, int);
 static uint8_t bnx_eeprom_getbyte(struct bnx_softc *, uint32_t, uint8_t *);
 static int     bnx_read_eeprom(struct bnx_softc *, caddr_t, uint32_t, size_t);
@@ -220,6 +247,8 @@ static int  bnx_get_eaddr_eeprom(struct bnx_softc *, uint8_t[]);
 static int     bnx_get_eaddr(struct bnx_softc *, uint8_t[]);
 
 static void    bnx_coal_change(struct bnx_softc *);
+static int     bnx_sysctl_force_defrag(SYSCTL_HANDLER_ARGS);
+static int     bnx_sysctl_tx_wreg(SYSCTL_HANDLER_ARGS);
 static int     bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS);
 static int     bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS);
 static int     bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS);
@@ -250,7 +279,7 @@ static device_method_t bnx_methods[] = {
        DEVMETHOD(miibus_writereg,      bnx_miibus_writereg),
        DEVMETHOD(miibus_statchg,       bnx_miibus_statchg),
 
-       { 0, 0 }
+       DEVMETHOD_END
 };
 
 static DEFINE_CLASS_0(bnx, bnx_driver, bnx_methods, sizeof(struct bnx_softc));
@@ -266,10 +295,6 @@ bnx_readmem_ind(struct bnx_softc *sc, uint32_t off)
        device_t dev = sc->bnx_dev;
        uint32_t val;
 
-       if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
-           off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
-               return 0;
-
        pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
        val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
        pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
@@ -281,35 +306,11 @@ bnx_writemem_ind(struct bnx_softc *sc, uint32_t off, uint32_t val)
 {
        device_t dev = sc->bnx_dev;
 
-       if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
-           off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
-               return;
-
        pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
        pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
        pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
 }
 
-#ifdef notdef
-static uint32_t
-bnx_readreg_ind(struct bnx_softc *sc, uin32_t off)
-{
-       device_t dev = sc->bnx_dev;
-
-       pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
-       return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
-}
-#endif
-
-static void
-bnx_writereg_ind(struct bnx_softc *sc, uint32_t off, uint32_t val)
-{
-       device_t dev = sc->bnx_dev;
-
-       pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
-       pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
-}
-
 static void
 bnx_writemem_direct(struct bnx_softc *sc, uint32_t off, uint32_t val)
 {
@@ -319,82 +320,16 @@ bnx_writemem_direct(struct bnx_softc *sc, uint32_t off, uint32_t val)
 static void
 bnx_writembx(struct bnx_softc *sc, int off, int val)
 {
-       if (sc->bnx_asicrev == BGE_ASICREV_BCM5906)
-               off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
-
        CSR_WRITE_4(sc, off, val);
 }
 
-static uint8_t
-bnx_nvram_getbyte(struct bnx_softc *sc, int addr, uint8_t *dest)
-{
-       uint32_t access, byte = 0;
-       int i;
-
-       /* Lock. */
-       CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
-       for (i = 0; i < 8000; i++) {
-               if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
-                       break;
-               DELAY(20);
-       }
-       if (i == 8000)
-               return (1);
-
-       /* Enable access. */
-       access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
-       CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
-
-       CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
-       CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
-       for (i = 0; i < BNX_TIMEOUT * 10; i++) {
-               DELAY(10);
-               if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
-                       DELAY(10);
-                       break;
-               }
-       }
-
-       if (i == BNX_TIMEOUT * 10) {
-               if_printf(&sc->arpcom.ac_if, "nvram read timed out\n");
-               return (1);
-       }
-
-       /* Get result. */
-       byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
-
-       *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
-
-       /* Disable access. */
-       CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
-
-       /* Unlock. */
-       CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
-       CSR_READ_4(sc, BGE_NVRAM_SWARB);
-
-       return (0);
-}
-
 /*
  * Read a sequence of bytes from NVRAM.
  */
 static int
 bnx_read_nvram(struct bnx_softc *sc, caddr_t dest, int off, int cnt)
 {
-       int err = 0, i;
-       uint8_t byte = 0;
-
-       if (sc->bnx_asicrev != BGE_ASICREV_BCM5906)
-               return (1);
-
-       for (i = 0; i < cnt; i++) {
-               err = bnx_nvram_getbyte(sc, off + i, &byte);
-               if (err)
-                       break;
-               *(dest + i) = byte;
-       }
-
-       return (err ? 1 : 0);
+       return (1);
 }
 
 /*
@@ -520,10 +455,6 @@ bnx_miibus_writereg(device_t dev, int phy, int reg, int val)
        KASSERT(phy == sc->bnx_phyno,
            ("invalid phyno %d, should be %d", phy, sc->bnx_phyno));
 
-       if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
-           (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
-              return 0;
-
        /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
        if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
                CSR_WRITE_4(sc, BGE_MI_MODE,
@@ -575,10 +506,7 @@ bnx_miibus_statchg(device_t dev)
                case IFM_1000_T:
                case IFM_1000_SX:
                case IFM_2500_SX:
-                       if (sc->bnx_asicrev != BGE_ASICREV_BCM5906)
-                               sc->bnx_link = 1;
-                       else
-                               sc->bnx_link = 0;
+                       sc->bnx_link = 1;
                        break;
                default:
                        sc->bnx_link = 0;
@@ -777,12 +705,13 @@ bnx_jfree(void *arg)
  * Intialize a standard receive ring descriptor.
  */
 static int
-bnx_newbuf_std(struct bnx_softc *sc, int i, int init)
+bnx_newbuf_std(struct bnx_rx_ret_ring *ret, int i, int init)
 {
        struct mbuf *m_new = NULL;
        bus_dma_segment_t seg;
        bus_dmamap_t map;
        int error, nsegs;
+       struct bnx_rx_buf *rb;
 
        m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
        if (m_new == NULL)
@@ -790,45 +719,44 @@ bnx_newbuf_std(struct bnx_softc *sc, int i, int init)
        m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
        m_adj(m_new, ETHER_ALIGN);
 
-       error = bus_dmamap_load_mbuf_segment(sc->bnx_cdata.bnx_rx_mtag,
-                       sc->bnx_cdata.bnx_rx_tmpmap, m_new,
-                       &seg, 1, &nsegs, BUS_DMA_NOWAIT);
+       error = bus_dmamap_load_mbuf_segment(ret->bnx_rx_mtag,
+           ret->bnx_rx_tmpmap, m_new, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
        if (error) {
                m_freem(m_new);
                return error;
        }
 
+       rb = &ret->bnx_std->bnx_rx_std_buf[i];
+
        if (!init) {
-               bus_dmamap_sync(sc->bnx_cdata.bnx_rx_mtag,
-                               sc->bnx_cdata.bnx_rx_std_dmamap[i],
-                               BUS_DMASYNC_POSTREAD);
-               bus_dmamap_unload(sc->bnx_cdata.bnx_rx_mtag,
-                       sc->bnx_cdata.bnx_rx_std_dmamap[i]);
+               bus_dmamap_sync(ret->bnx_rx_mtag, rb->bnx_rx_dmamap,
+                   BUS_DMASYNC_POSTREAD);
+               bus_dmamap_unload(ret->bnx_rx_mtag, rb->bnx_rx_dmamap);
        }
 
-       map = sc->bnx_cdata.bnx_rx_tmpmap;
-       sc->bnx_cdata.bnx_rx_tmpmap = sc->bnx_cdata.bnx_rx_std_dmamap[i];
-       sc->bnx_cdata.bnx_rx_std_dmamap[i] = map;
+       map = ret->bnx_rx_tmpmap;
+       ret->bnx_rx_tmpmap = rb->bnx_rx_dmamap;
+       rb->bnx_rx_dmamap = map;
 
-       sc->bnx_cdata.bnx_rx_std_chain[i].bnx_mbuf = m_new;
-       sc->bnx_cdata.bnx_rx_std_chain[i].bnx_paddr = seg.ds_addr;
+       rb->bnx_rx_mbuf = m_new;
+       rb->bnx_rx_paddr = seg.ds_addr;
 
-       bnx_setup_rxdesc_std(sc, i);
+       bnx_setup_rxdesc_std(ret->bnx_std, i);
        return 0;
 }
 
 static void
-bnx_setup_rxdesc_std(struct bnx_softc *sc, int i)
+bnx_setup_rxdesc_std(struct bnx_rx_std_ring *std, int i)
 {
-       struct bnx_rxchain *rc;
+       const struct bnx_rx_buf *rb;
        struct bge_rx_bd *r;
 
-       rc = &sc->bnx_cdata.bnx_rx_std_chain[i];
-       r = &sc->bnx_ldata.bnx_rx_std_ring[i];
+       rb = &std->bnx_rx_std_buf[i];
+       r = &std->bnx_rx_std_ring[i];
 
-       r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_paddr);
-       r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_paddr);
-       r->bge_len = rc->bnx_mbuf->m_len;
+       r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rb->bnx_rx_paddr);
+       r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rb->bnx_rx_paddr);
+       r->bge_len = rb->bnx_rx_mbuf->m_len;
        r->bge_idx = i;
        r->bge_flags = BGE_RXBDFLAG_END;
 }
@@ -873,8 +801,8 @@ bnx_newbuf_jumbo(struct bnx_softc *sc, int i, int init)
        paddr += ETHER_ALIGN;
 
        /* Save necessary information */
-       sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_mbuf = m_new;
-       sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_paddr = paddr;
+       sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_rx_mbuf = m_new;
+       sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_rx_paddr = paddr;
 
        /* Set up the descriptor. */
        bnx_setup_rxdesc_jumbo(sc, i);
@@ -885,51 +813,50 @@ static void
 bnx_setup_rxdesc_jumbo(struct bnx_softc *sc, int i)
 {
        struct bge_rx_bd *r;
-       struct bnx_rxchain *rc;
+       struct bnx_rx_buf *rc;
 
        r = &sc->bnx_ldata.bnx_rx_jumbo_ring[i];
        rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i];
 
-       r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_paddr);
-       r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_paddr);
-       r->bge_len = rc->bnx_mbuf->m_len;
+       r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_rx_paddr);
+       r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_rx_paddr);
+       r->bge_len = rc->bnx_rx_mbuf->m_len;
        r->bge_idx = i;
        r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
 }
 
 static int
-bnx_init_rx_ring_std(struct bnx_softc *sc)
+bnx_init_rx_ring_std(struct bnx_rx_std_ring *std)
 {
        int i, error;
 
        for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
-               error = bnx_newbuf_std(sc, i, 1);
+               /* Use the first RX return ring's tmp RX mbuf DMA map */
+               error = bnx_newbuf_std(&std->bnx_sc->bnx_rx_ret_ring[0], i, 1);
                if (error)
                        return error;
-       };
+       }
 
-       sc->bnx_std = BGE_STD_RX_RING_CNT - 1;
-       bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bnx_std);
+       std->bnx_rx_std = BGE_STD_RX_RING_CNT - 1;
+       bnx_writembx(std->bnx_sc, BGE_MBX_RX_STD_PROD_LO, std->bnx_rx_std);
 
        return(0);
 }
 
 static void
-bnx_free_rx_ring_std(struct bnx_softc *sc)
+bnx_free_rx_ring_std(struct bnx_rx_std_ring *std)
 {
        int i;
 
        for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
-               struct bnx_rxchain *rc = &sc->bnx_cdata.bnx_rx_std_chain[i];
+               struct bnx_rx_buf *rb = &std->bnx_rx_std_buf[i];
 
-               if (rc->bnx_mbuf != NULL) {
-                       bus_dmamap_unload(sc->bnx_cdata.bnx_rx_mtag,
-                                         sc->bnx_cdata.bnx_rx_std_dmamap[i]);
-                       m_freem(rc->bnx_mbuf);
-                       rc->bnx_mbuf = NULL;
+               if (rb->bnx_rx_mbuf != NULL) {
+                       bus_dmamap_unload(std->bnx_rx_mtag, rb->bnx_rx_dmamap);
+                       m_freem(rb->bnx_rx_mbuf);
+                       rb->bnx_rx_mbuf = NULL;
                }
-               bzero(&sc->bnx_ldata.bnx_rx_std_ring[i],
-                   sizeof(struct bge_rx_bd));
+               bzero(&std->bnx_rx_std_ring[i], sizeof(struct bge_rx_bd));
        }
 }
 
@@ -943,7 +870,7 @@ bnx_init_rx_ring_jumbo(struct bnx_softc *sc)
                error = bnx_newbuf_jumbo(sc, i, 1);
                if (error)
                        return error;
-       };
+       }
 
        sc->bnx_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
 
@@ -962,11 +889,11 @@ bnx_free_rx_ring_jumbo(struct bnx_softc *sc)
        int i;
 
        for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
-               struct bnx_rxchain *rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i];
+               struct bnx_rx_buf *rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i];
 
-               if (rc->bnx_mbuf != NULL) {
-                       m_freem(rc->bnx_mbuf);
-                       rc->bnx_mbuf = NULL;
+               if (rc->bnx_rx_mbuf != NULL) {
+                       m_freem(rc->bnx_rx_mbuf);
+                       rc->bnx_rx_mbuf = NULL;
                }
                bzero(&sc->bnx_ldata.bnx_rx_jumbo_ring[i],
                    sizeof(struct bge_rx_bd));
@@ -974,32 +901,33 @@ bnx_free_rx_ring_jumbo(struct bnx_softc *sc)
 }
 
 static void
-bnx_free_tx_ring(struct bnx_softc *sc)
+bnx_free_tx_ring(struct bnx_tx_ring *txr)
 {
        int i;
 
        for (i = 0; i < BGE_TX_RING_CNT; i++) {
-               if (sc->bnx_cdata.bnx_tx_chain[i] != NULL) {
-                       bus_dmamap_unload(sc->bnx_cdata.bnx_tx_mtag,
-                                         sc->bnx_cdata.bnx_tx_dmamap[i]);
-                       m_freem(sc->bnx_cdata.bnx_tx_chain[i]);
-                       sc->bnx_cdata.bnx_tx_chain[i] = NULL;
+               struct bnx_tx_buf *buf = &txr->bnx_tx_buf[i];
+
+               if (buf->bnx_tx_mbuf != NULL) {
+                       bus_dmamap_unload(txr->bnx_tx_mtag,
+                           buf->bnx_tx_dmamap);
+                       m_freem(buf->bnx_tx_mbuf);
+                       buf->bnx_tx_mbuf = NULL;
                }
-               bzero(&sc->bnx_ldata.bnx_tx_ring[i],
-                   sizeof(struct bge_tx_bd));
+               bzero(&txr->bnx_tx_ring[i], sizeof(struct bge_tx_bd));
        }
+       txr->bnx_tx_saved_considx = BNX_TXCONS_UNSET;
 }
 
 static int
-bnx_init_tx_ring(struct bnx_softc *sc)
+bnx_init_tx_ring(struct bnx_tx_ring *txr)
 {
-       sc->bnx_txcnt = 0;
-       sc->bnx_tx_saved_considx = 0;
-       sc->bnx_tx_prodidx = 0;
+       txr->bnx_tx_cnt = 0;
+       txr->bnx_tx_saved_considx = 0;
+       txr->bnx_tx_prodidx = 0;
 
        /* Initialize transmit producer index for host-memory send ring. */
-       bnx_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bnx_tx_prodidx);
-       bnx_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
+       bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, txr->bnx_tx_prodidx);
 
        return(0);
 }
@@ -1085,6 +1013,11 @@ bnx_chipinit(struct bnx_softc *sc)
                        CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
                }
                if (sc->bnx_chiprev != BGE_CHIPREV_57765_AX) {
+                       /* Fix transmit hangs */
+                       val = CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL);
+                       val |= BGE_CPMU_PADRNG_CTL_RDIV2;
+                       CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL, val);
+
                        mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
                        val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS;
 
@@ -1126,6 +1059,7 @@ bnx_chipinit(struct bnx_softc *sc)
         * disabled.
         */
        if (sc->bnx_asicrev != BGE_ASICREV_BCM5717 &&
+           sc->bnx_asicrev != BGE_ASICREV_BCM5762 &&
            !BNX_IS_57765_FAMILY(sc))
                dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
        if (bootverbose) {
@@ -1152,21 +1086,14 @@ bnx_chipinit(struct bnx_softc *sc)
        /* Set the timer prescaler (always 66Mhz) */
        CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
 
-       if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
-               DELAY(40);      /* XXX */
-
-               /* Put PHY into ready state */
-               BNX_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
-               CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
-               DELAY(40);
-       }
-
        return(0);
 }
 
 static int
 bnx_blockinit(struct bnx_softc *sc)
 {
+       struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0];
+       struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0];
        struct bge_rcb *rcb;
        bus_size_t vrcb;
        bge_hostaddr taddr;
@@ -1191,10 +1118,6 @@ bnx_blockinit(struct bnx_softc *sc)
                        CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
                        CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
                }
-       } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
-               CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
-               CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
-               CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
        } else {
                CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
                CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
@@ -1285,9 +1208,9 @@ bnx_blockinit(struct bnx_softc *sc)
        /* Initialize the standard receive producer ring control block. */
        rcb = &sc->bnx_ldata.bnx_info.bnx_std_rx_rcb;
        rcb->bge_hostaddr.bge_addr_lo =
-           BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_std_ring_paddr);
+           BGE_ADDR_LO(sc->bnx_rx_std_ring.bnx_rx_std_ring_paddr);
        rcb->bge_hostaddr.bge_addr_hi =
-           BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_std_ring_paddr);
+           BGE_ADDR_HI(sc->bnx_rx_std_ring.bnx_rx_std_ring_paddr);
        if (BNX_IS_57765_PLUS(sc)) {
                /*
                 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
@@ -1351,15 +1274,6 @@ bnx_blockinit(struct bnx_softc *sc)
                bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
        }
 
-       /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
-       if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
-           (sc->bnx_chipid == BGE_CHIPID_BCM5906_A0 ||
-            sc->bnx_chipid == BGE_CHIPID_BCM5906_A1 ||
-            sc->bnx_chipid == BGE_CHIPID_BCM5906_A2)) {
-               CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
-                   (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
-       }
-
        /*
         * The BD ring replenish thresholds control how often the
         * hardware fetches new BD's from the producer rings in host
@@ -1388,7 +1302,8 @@ bnx_blockinit(struct bnx_softc *sc)
         */
        if (BNX_IS_5717_PLUS(sc))
                limit = 4;
-       else if (BNX_IS_57765_FAMILY(sc))
+       else if (BNX_IS_57765_FAMILY(sc) ||
+           sc->bnx_asicrev == BGE_ASICREV_BCM5762)
                limit = 2;
        else
                limit = 1;
@@ -1402,7 +1317,7 @@ bnx_blockinit(struct bnx_softc *sc)
 
        /* Configure send ring RCB 0 (we use only the first ring) */
        vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
-       BGE_HOSTADDR(taddr, sc->bnx_ldata.bnx_tx_ring_paddr);
+       BGE_HOSTADDR(taddr, txr->bnx_tx_ring_paddr);
        RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
        RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
        if (BNX_IS_5717_PLUS(sc)) {
@@ -1422,7 +1337,8 @@ bnx_blockinit(struct bnx_softc *sc)
        if (BNX_IS_5717_PLUS(sc)) {
                /* Should be 17, use 16 until we get an SRAM map. */
                limit = 16;
-       } else if (BNX_IS_57765_FAMILY(sc)) {
+       } else if (BNX_IS_57765_FAMILY(sc) ||
+           sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
                limit = 4;
        } else {
                limit = 1;
@@ -1446,12 +1362,12 @@ bnx_blockinit(struct bnx_softc *sc)
         * within the host, so the nicaddr field in the RCB isn't used.
         */
        vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
-       BGE_HOSTADDR(taddr, sc->bnx_ldata.bnx_rx_return_ring_paddr);
+       BGE_HOSTADDR(taddr, ret->bnx_rx_ret_ring_paddr);
        RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
        RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
        RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
        RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
-           BGE_RCB_MAXLEN_FLAGS(sc->bnx_return_ring_cnt, 0));
+           BGE_RCB_MAXLEN_FLAGS(BNX_RETURN_RING_CNT, 0));
 
        /* Set random backoff seed for TX */
        CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
@@ -1462,7 +1378,8 @@ bnx_blockinit(struct bnx_softc *sc)
 
        /* Set inter-packet gap */
        val = 0x2620;
-       if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
+       if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
+           sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
                val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
                    (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
        }
@@ -1573,15 +1490,21 @@ bnx_blockinit(struct bnx_softc *sc)
        DELAY(40);
 
        if (BNX_IS_57765_PLUS(sc)) {
-               uint32_t dmactl;
+               uint32_t dmactl, dmactl_reg;
+
+               if (sc->bnx_asicrev == BGE_ASICREV_BCM5762)
+                       dmactl_reg = BGE_RDMA_RSRVCTRL2;
+               else
+                       dmactl_reg = BGE_RDMA_RSRVCTRL;
 
-               dmactl = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL);
+               dmactl = CSR_READ_4(sc, dmactl_reg);
                /*
                 * Adjust tx margin to prevent TX data corruption and
                 * fix internal FIFO overflow.
                 */
                if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
-                   sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
+                   sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
+                   sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
                        dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
                            BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
                            BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
@@ -1594,7 +1517,7 @@ bnx_blockinit(struct bnx_softc *sc)
                 * The fix is to limit the number of RX BDs
                 * the hardware would fetch at a fime.
                 */
-               CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL,
+               CSR_WRITE_4(sc, dmactl_reg,
                    dmactl | BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
        }
 
@@ -1603,13 +1526,21 @@ bnx_blockinit(struct bnx_softc *sc)
                    CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
                    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
                    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
-       } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
+       } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
+           sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
+               uint32_t ctrl_reg;
+
+               if (sc->bnx_asicrev == BGE_ASICREV_BCM5762)
+                       ctrl_reg = BGE_RDMA_LSO_CRPTEN_CTRL2;
+               else
+                       ctrl_reg = BGE_RDMA_LSO_CRPTEN_CTRL;
+
                /*
                 * Allow 4KB burst length reads for non-LSO frames.
                 * Enable 512B burst length reads for buffer descriptors.
                 */
-               CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
-                   CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
+               CSR_WRITE_4(sc, ctrl_reg,
+                   CSR_READ_4(sc, ctrl_reg) |
                    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
                    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
        }
@@ -1625,7 +1556,8 @@ bnx_blockinit(struct bnx_softc *sc)
                    BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
                    BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
        }
-       if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
+       if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
+           sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
                val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
                    BGE_RDMAMODE_H2BNC_VLAN_DET;
                /*
@@ -1634,6 +1566,8 @@ bnx_blockinit(struct bnx_softc *sc)
                 */
                val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
        }
+       if (sc->bnx_asicrev == BGE_ASICREV_BCM57766)
+               val |= BGE_RDMAMODE_JMB_2K_MMRR;
        if (sc->bnx_flags & BNX_FLAG_TSO)
                val |= BGE_RDMAMODE_TSO4_ENABLE;
        val |= BGE_RDMAMODE_FIFO_LONG_BURST;
@@ -1748,26 +1682,22 @@ bnx_attach(device_t dev)
 {
        struct ifnet *ifp;
        struct bnx_softc *sc;
-       uint32_t hwcfg = 0, misccfg;
-       int error = 0, rid, capmask;
+       uint32_t hwcfg = 0;
+       int error = 0, rid, capmask, i;
        uint8_t ether_addr[ETHER_ADDR_LEN];
-       uint16_t product, vendor;
-       driver_intr_t *intr_func;
+       uint16_t product;
        uintptr_t mii_priv = 0;
-       u_int intr_flags;
 #ifdef BNX_TSO_DEBUG
        char desc[32];
-       int i;
 #endif
 
        sc = device_get_softc(dev);
        sc->bnx_dev = dev;
        callout_init_mp(&sc->bnx_stat_timer);
-       callout_init_mp(&sc->bnx_intr_timer);
        lwkt_serialize_init(&sc->bnx_jslot_serializer);
+       lwkt_serialize_init(&sc->bnx_main_serialize);
 
        product = pci_get_device(dev);
-       vendor = pci_get_vendor(dev);
 
 #ifndef BURN_BRIDGES
        if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
@@ -1813,9 +1743,13 @@ bnx_attach(device_t dev)
 
                switch (product) {
                case PCI_PRODUCT_BROADCOM_BCM5717:
+               case PCI_PRODUCT_BROADCOM_BCM5717C:
                case PCI_PRODUCT_BROADCOM_BCM5718:
                case PCI_PRODUCT_BROADCOM_BCM5719:
                case PCI_PRODUCT_BROADCOM_BCM5720_ALT:
+               case PCI_PRODUCT_BROADCOM_BCM5725:
+               case PCI_PRODUCT_BROADCOM_BCM5727:
+               case PCI_PRODUCT_BROADCOM_BCM5762:
                        sc->bnx_chipid = pci_read_config(dev,
                            BGE_PCI_GEN2_PRODID_ASICREV, 4);
                        break;
@@ -1840,6 +1774,9 @@ bnx_attach(device_t dev)
                        break;
                }
        }
+       if (sc->bnx_chipid == BGE_CHIPID_BCM5717_C0)
+               sc->bnx_chipid = BGE_CHIPID_BCM5720_A0;
+
        sc->bnx_asicrev = BGE_ASICREV(sc->bnx_chipid);
        sc->bnx_chiprev = BGE_CHIPREV(sc->bnx_chipid);
 
@@ -1850,12 +1787,15 @@ bnx_attach(device_t dev)
                sc->bnx_flags |= BNX_FLAG_5717_PLUS | BNX_FLAG_57765_PLUS;
                break;
 
+       case BGE_ASICREV_BCM5762:
+               sc->bnx_flags |= BNX_FLAG_57765_PLUS;
+               break;
+
        case BGE_ASICREV_BCM57765:
        case BGE_ASICREV_BCM57766:
                sc->bnx_flags |= BNX_FLAG_57765_FAMILY | BNX_FLAG_57765_PLUS;
                break;
        }
-       sc->bnx_flags |= BNX_FLAG_SHORTDMA;
 
        sc->bnx_flags |= BNX_FLAG_TSO;
        if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 &&
@@ -1877,13 +1817,11 @@ bnx_attach(device_t dev)
                 * For the rest of the chips in these two families, we will
                 * have to poll the status block at high rate (10ms currently)
                 * to check whether the interrupt is hosed or not.
-                * See bnx_intr_check() for details.
+                * See bnx_check_intr() for details.
                 */
                sc->bnx_flags |= BNX_FLAG_STATUSTAG_BUG;
        }
 
-       misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK;
-
        sc->bnx_pciecap = pci_get_pciecap_ptr(sc->bnx_dev);
        if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
            sc->bnx_asicrev == BGE_ASICREV_BCM5720)
@@ -1906,25 +1844,8 @@ bnx_attach(device_t dev)
        }
 
        mii_priv |= BRGPHY_FLAG_WIRESPEED;
-
-       /*
-        * Allocate interrupt
-        */
-       sc->bnx_irq_type = pci_alloc_1intr(dev, bnx_msi_enable, &sc->bnx_irq_rid,
-           &intr_flags);
-
-       sc->bnx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->bnx_irq_rid,
-           intr_flags);
-       if (sc->bnx_irq == NULL) {
-               device_printf(dev, "couldn't map interrupt\n");
-               error = ENXIO;
-               goto fail;
-       }
-
-       if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) {
-               sc->bnx_flags |= BNX_FLAG_ONESHOT_MSI;
-               bnx_enable_msi(sc);
-       }
+       if (sc->bnx_chipid == BGE_CHIPID_BCM5762_A0)
+               mii_priv |= BRGPHY_FLAG_5762_A0;
 
        /* Initialize if_name earlier, so if_printf could be used */
        ifp = &sc->arpcom.ac_if;
@@ -1948,17 +1869,24 @@ bnx_attach(device_t dev)
                goto fail;
        }
 
-       if (BNX_IS_57765_PLUS(sc)) {
-               sc->bnx_return_ring_cnt = BGE_RETURN_RING_CNT;
-       } else {
-               /* 5705/5750 limits RX return ring to 512 entries. */
-               sc->bnx_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
-       }
+       /* XXX */
+       sc->bnx_tx_ringcnt = 1;
+       sc->bnx_rx_retcnt = 1;
+
+       error = bnx_dma_alloc(dev);
+       if (error)
+               goto fail;
 
-       error = bnx_dma_alloc(sc);
+       /*
+        * Allocate interrupt
+        */
+       error = bnx_alloc_intr(sc);
        if (error)
                goto fail;
 
+       /* Setup serializers */
+       bnx_setup_serialize(sc);
+
        /* Set default tuneable values. */
        sc->bnx_rx_coal_ticks = BNX_RX_COAL_TICKS_DEF;
        sc->bnx_tx_coal_ticks = BNX_TX_COAL_TICKS_DEF;
@@ -1972,15 +1900,19 @@ bnx_attach(device_t dev)
        ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
        ifp->if_ioctl = bnx_ioctl;
        ifp->if_start = bnx_start;
-#ifdef DEVICE_POLLING
-       ifp->if_poll = bnx_poll;
+#ifdef IFPOLL_ENABLE
+       ifp->if_npoll = bnx_npoll;
 #endif
        ifp->if_watchdog = bnx_watchdog;
        ifp->if_init = bnx_init;
+       ifp->if_serialize = bnx_serialize;
+       ifp->if_deserialize = bnx_deserialize;
+       ifp->if_tryserialize = bnx_tryserialize;
+#ifdef INVARIANTS
+       ifp->if_serialize_assert = bnx_serialize_assert;
+#endif
        ifp->if_mtu = ETHERMTU;
        ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
-       ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
-       ifq_set_ready(&ifp->if_snd);
 
        ifp->if_capabilities |= IFCAP_HWCSUM;
        ifp->if_hwassist = BNX_CSUM_FEATURES;
@@ -1990,6 +1922,10 @@ bnx_attach(device_t dev)
        }
        ifp->if_capenable = ifp->if_capabilities;
 
+       ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
+       ifq_set_ready(&ifp->if_snd);
+       ifq_set_subq_cnt(&ifp->if_snd, sc->bnx_tx_ringcnt);
+
        /*
         * Figure out what sort of media we have by checking the
         * hardware config word in the first 32k of NIC internal memory,
@@ -2150,11 +2086,18 @@ bnx_attach(device_t dev)
         * consumes a lot of CPU cycles, so leave it off by
         * default.
         */
-       SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx,
+       SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
            SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
-           "force_defrag", CTLFLAG_RW, &sc->bnx_force_defrag, 0,
+           "force_defrag", CTLTYPE_INT | CTLFLAG_RW,
+           sc, 0, bnx_sysctl_force_defrag, "I",
            "Force defragment on TX path");
 
+       SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
+           SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
+           "tx_wreg", CTLTYPE_INT | CTLFLAG_RW,
+           sc, 0, bnx_sysctl_tx_wreg, "I",
+           "# of segments before writing to hardware register");
+
        SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
            SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
            "rx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
@@ -2180,30 +2123,34 @@ bnx_attach(device_t dev)
         */
        ether_ifattach(ifp, ether_addr, NULL);
 
-       if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) {
-               if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) {
-                       intr_func = bnx_msi_oneshot;
-                       if (bootverbose)
-                               device_printf(dev, "oneshot MSI\n");
-               } else {
-                       intr_func = bnx_msi;
-               }
-       } else {
-               intr_func = bnx_intr_legacy;
+       /* Setup TX rings and subqueues */
+       for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
+               struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i);
+               struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
+
+               ifsq_set_cpuid(ifsq, txr->bnx_tx_cpuid);
+               ifsq_set_hw_serialize(ifsq, &txr->bnx_tx_serialize);
+#ifdef notyet
+               ifsq_set_priv(ifsq, txr);
+               txr->ifsq = ifsq;
+
+               ifsq_watchdog_init(&txr->tx_watchdog, ifsq, bce_watchdog);
+#endif
        }
-       error = bus_setup_intr(dev, sc->bnx_irq, INTR_MPSAFE, intr_func, sc,
-           &sc->bnx_intrhand, ifp->if_serializer);
+
+#ifdef IFPOLL_ENABLE
+       ifpoll_compat_setup(&sc->bnx_npoll,
+           &sc->bnx_sysctl_ctx, sc->bnx_sysctl_tree,
+           device_get_unit(dev), &sc->bnx_main_serialize);
+#endif
+
+       error = bnx_setup_intr(sc);
        if (error) {
                ether_ifdetach(ifp);
-               device_printf(dev, "couldn't set up irq\n");
                goto fail;
        }
 
-       ifp->if_cpuid = rman_get_cpuid(sc->bnx_irq);
-       KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
-
-       sc->bnx_stat_cpuid = ifp->if_cpuid;
-       sc->bnx_intr_cpuid = ifp->if_cpuid;
+       sc->bnx_stat_cpuid = sc->bnx_intr_data[0].bnx_intr_cpuid;
 
        return(0);
 fail:
@@ -2219,11 +2166,11 @@ bnx_detach(device_t dev)
        if (device_is_attached(dev)) {
                struct ifnet *ifp = &sc->arpcom.ac_if;
 
-               lwkt_serialize_enter(ifp->if_serializer);
+               ifnet_serialize_all(ifp);
                bnx_stop(sc);
                bnx_reset(sc);
-               bus_teardown_intr(dev, sc->bnx_irq, sc->bnx_intrhand);
-               lwkt_serialize_exit(ifp->if_serializer);
+               bnx_teardown_intr(sc, sc->bnx_intr_cnt);
+               ifnet_deserialize_all(ifp);
 
                ether_ifdetach(ifp);
        }
@@ -2234,12 +2181,7 @@ bnx_detach(device_t dev)
                device_delete_child(dev, sc->bnx_miibus);
        bus_generic_detach(dev);
 
-       if (sc->bnx_irq != NULL) {
-               bus_release_resource(dev, SYS_RES_IRQ, sc->bnx_irq_rid,
-                   sc->bnx_irq);
-       }
-       if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI)
-               pci_release_msi(dev);
+       bnx_free_intr(sc);
 
        if (sc->bnx_res != NULL) {
                bus_release_resource(dev, SYS_RES_MEMORY,
@@ -2251,6 +2193,9 @@ bnx_detach(device_t dev)
 
        bnx_dma_free(sc);
 
+       if (sc->bnx_serialize != NULL)
+               kfree(sc->bnx_serialize, M_DEVBUF);
+
        return 0;
 }
 
@@ -2265,10 +2210,7 @@ bnx_reset(struct bnx_softc *sc)
 
        dev = sc->bnx_dev;
 
-       if (sc->bnx_asicrev != BGE_ASICREV_BCM5906)
-               write_op = bnx_writemem_direct;
-       else
-               write_op = bnx_writereg_ind;
+       write_op = bnx_writemem_direct;
 
        /* Save some important PCI state. */
        cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
@@ -2319,17 +2261,6 @@ bnx_reset(struct bnx_softc *sc)
        /* Issue global reset */
        write_op(sc, BGE_MISC_CFG, reset);
 
-       if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
-               uint32_t status, ctrl;
-
-               status = CSR_READ_4(sc, BGE_VCPU_STATUS);
-               CSR_WRITE_4(sc, BGE_VCPU_STATUS,
-                   status | BGE_VCPU_STATUS_DRV_RESET);
-               ctrl = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
-               CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
-                   ctrl & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
-       }
-
        DELAY(1000);
 
        /* XXX: Broadcom Linux driver. */
@@ -2374,38 +2305,24 @@ bnx_reset(struct bnx_softc *sc)
        /* Enable memory arbiter */
        CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
 
-       if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
-               for (i = 0; i < BNX_TIMEOUT; i++) {
-                       val = CSR_READ_4(sc, BGE_VCPU_STATUS);
-                       if (val & BGE_VCPU_STATUS_INIT_DONE)
-                               break;
-                       DELAY(100);
-               }
-               if (i == BNX_TIMEOUT) {
-                       if_printf(&sc->arpcom.ac_if, "reset timed out\n");
-                       return;
-               }
-       } else {
-               /*
-                * Poll until we see the 1's complement of the magic number.
-                * This indicates that the firmware initialization
-                * is complete.
-                */
-               for (i = 0; i < BNX_FIRMWARE_TIMEOUT; i++) {
-                       val = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
-                       if (val == ~BGE_MAGIC_NUMBER)
-                               break;
-                       DELAY(10);
-               }
-               if (i == BNX_FIRMWARE_TIMEOUT) {
-                       if_printf(&sc->arpcom.ac_if, "firmware handshake "
-                                 "timed out, found 0x%08x\n", val);
-               }
-
-               /* BCM57765 A0 needs additional time before accessing. */
-               if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0)
-                       DELAY(10 * 1000);
+       /*
+        * Poll until we see the 1's complement of the magic number.
+        * This indicates that the firmware initialization is complete.
+        */
+       for (i = 0; i < BNX_FIRMWARE_TIMEOUT; i++) {
+               val = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
+               if (val == ~BGE_MAGIC_NUMBER)
+                       break;
+               DELAY(10);
        }
+       if (i == BNX_FIRMWARE_TIMEOUT) {
+               if_printf(&sc->arpcom.ac_if, "firmware handshake "
+                         "timed out, found 0x%08x\n", val);
+       }
+
+       /* BCM57765 A0 needs additional time before accessing. */
+       if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0)
+               DELAY(10 * 1000);
 
        /*
         * XXX Wait for the value of the PCISTATE register to
@@ -2440,6 +2357,10 @@ bnx_reset(struct bnx_softc *sc)
                CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
        }
 
+       CSR_WRITE_4(sc, BGE_MI_MODE,
+           sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
+       DELAY(80);
+
        /* XXX: Broadcom Linux driver. */
        if (!BNX_IS_57765_PLUS(sc)) {
                uint32_t v;
@@ -2467,25 +2388,26 @@ bnx_reset(struct bnx_softc *sc)
  */
 
 static void
-bnx_rxeof(struct bnx_softc *sc, uint16_t rx_prod)
+bnx_rxeof(struct bnx_rx_ret_ring *ret, uint16_t rx_prod, int count)
 {
-       struct ifnet *ifp;
+       struct bnx_softc *sc = ret->bnx_sc;
+       struct bnx_rx_std_ring *std = ret->bnx_std;
+       struct ifnet *ifp = &sc->arpcom.ac_if;
        int stdcnt = 0, jumbocnt = 0;
 
-       ifp = &sc->arpcom.ac_if;
-
-       while (sc->bnx_rx_saved_considx != rx_prod) {
+       while (ret->bnx_rx_saved_considx != rx_prod && count != 0) {
                struct bge_rx_bd        *cur_rx;
                uint32_t                rxidx;
                struct mbuf             *m = NULL;
                uint16_t                vlan_tag = 0;
                int                     have_tag = 0;
 
-               cur_rx =
-           &sc->bnx_ldata.bnx_rx_return_ring[sc->bnx_rx_saved_considx];
+               --count;
+
+               cur_rx = &ret->bnx_rx_ret_ring[ret->bnx_rx_saved_considx];
 
                rxidx = cur_rx->bge_idx;
-               BNX_INC(sc->bnx_rx_saved_considx, sc->bnx_return_ring_cnt);
+               BNX_INC(ret->bnx_rx_saved_considx, BNX_RETURN_RING_CNT);
 
                if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
                        have_tag = 1;
@@ -2497,7 +2419,7 @@ bnx_rxeof(struct bnx_softc *sc, uint16_t rx_prod)
                        jumbocnt++;
 
                        if (rxidx != sc->bnx_jumbo) {
-                               ifp->if_ierrors++;
+                               IFNET_STAT_INC(ifp, ierrors, 1);
                                if_printf(ifp, "sw jumbo index(%d) "
                                    "and hw jumbo index(%d) mismatch, drop!\n",
                                    sc->bnx_jumbo, rxidx);
@@ -2505,44 +2427,44 @@ bnx_rxeof(struct bnx_softc *sc, uint16_t rx_prod)
                                continue;
                        }
 
-                       m = sc->bnx_cdata.bnx_rx_jumbo_chain[rxidx].bnx_mbuf;
+                       m = sc->bnx_cdata.bnx_rx_jumbo_chain[rxidx].bnx_rx_mbuf;
                        if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
-                               ifp->if_ierrors++;
+                               IFNET_STAT_INC(ifp, ierrors, 1);
                                bnx_setup_rxdesc_jumbo(sc, sc->bnx_jumbo);
                                continue;
                        }
                        if (bnx_newbuf_jumbo(sc, sc->bnx_jumbo, 0)) {
-                               ifp->if_ierrors++;
+                               IFNET_STAT_INC(ifp, ierrors, 1);
                                bnx_setup_rxdesc_jumbo(sc, sc->bnx_jumbo);
                                continue;
                        }
                } else {
-                       BNX_INC(sc->bnx_std, BGE_STD_RX_RING_CNT);
+                       BNX_INC(std->bnx_rx_std, BGE_STD_RX_RING_CNT);
                        stdcnt++;
 
-                       if (rxidx != sc->bnx_std) {
-                               ifp->if_ierrors++;
+                       if (rxidx != std->bnx_rx_std) {
+                               IFNET_STAT_INC(ifp, ierrors, 1);
                                if_printf(ifp, "sw std index(%d) "
                                    "and hw std index(%d) mismatch, drop!\n",
-                                   sc->bnx_std, rxidx);
-                               bnx_setup_rxdesc_std(sc, rxidx);
+                                   std->bnx_rx_std, rxidx);
+                               bnx_setup_rxdesc_std(std, rxidx);
                                continue;
                        }
 
-                       m = sc->bnx_cdata.bnx_rx_std_chain[rxidx].bnx_mbuf;
+                       m = std->bnx_rx_std_buf[rxidx].bnx_rx_mbuf;
                        if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
-                               ifp->if_ierrors++;
-                               bnx_setup_rxdesc_std(sc, sc->bnx_std);
+                               IFNET_STAT_INC(ifp, ierrors, 1);
+                               bnx_setup_rxdesc_std(std, std->bnx_rx_std);
                                continue;
                        }
-                       if (bnx_newbuf_std(sc, sc->bnx_std, 0)) {
-                               ifp->if_ierrors++;
-                               bnx_setup_rxdesc_std(sc, sc->bnx_std);
+                       if (bnx_newbuf_std(ret, std->bnx_rx_std, 0)) {
+                               IFNET_STAT_INC(ifp, ierrors, 1);
+                               bnx_setup_rxdesc_std(std, std->bnx_rx_std);
                                continue;
                        }
                }
 
-               ifp->if_ipackets++;
+               IFNET_STAT_INC(ifp, ipackets, 1);
                m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
                m->m_pkthdr.rcvif = ifp;
 
@@ -2569,101 +2491,121 @@ bnx_rxeof(struct bnx_softc *sc, uint16_t rx_prod)
                if (have_tag) {
                        m->m_flags |= M_VLANTAG;
                        m->m_pkthdr.ether_vlantag = vlan_tag;
-                       have_tag = vlan_tag = 0;
                }
                ifp->if_input(ifp, m);
        }
 
-       bnx_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bnx_rx_saved_considx);
+       bnx_writembx(sc, BGE_MBX_RX_CONS0_LO, ret->bnx_rx_saved_considx);
        if (stdcnt)
-               bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bnx_std);
+               bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, std->bnx_rx_std);
        if (jumbocnt)
                bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo);
 }
 
 static void
-bnx_txeof(struct bnx_softc *sc, uint16_t tx_cons)
+bnx_txeof(struct bnx_tx_ring *txr, uint16_t tx_cons)
 {
-       struct ifnet *ifp;
-
-       ifp = &sc->arpcom.ac_if;
+       struct ifnet *ifp = &txr->bnx_sc->arpcom.ac_if;
 
        /*
         * Go through our tx ring and free mbufs for those
         * frames that have been sent.
         */
-       while (sc->bnx_tx_saved_considx != tx_cons) {
+       while (txr->bnx_tx_saved_considx != tx_cons) {
+               struct bnx_tx_buf *buf;
                uint32_t idx = 0;
 
-               idx = sc->bnx_tx_saved_considx;
-               if (sc->bnx_cdata.bnx_tx_chain[idx] != NULL) {
-                       ifp->if_opackets++;
-                       bus_dmamap_unload(sc->bnx_cdata.bnx_tx_mtag,
-                           sc->bnx_cdata.bnx_tx_dmamap[idx]);
-                       m_freem(sc->bnx_cdata.bnx_tx_chain[idx]);
-                       sc->bnx_cdata.bnx_tx_chain[idx] = NULL;
+               idx = txr->bnx_tx_saved_considx;
+               buf = &txr->bnx_tx_buf[idx];
+               if (buf->bnx_tx_mbuf != NULL) {
+                       IFNET_STAT_INC(ifp, opackets, 1);
+                       bus_dmamap_unload(txr->bnx_tx_mtag,
+                           buf->bnx_tx_dmamap);
+                       m_freem(buf->bnx_tx_mbuf);
+                       buf->bnx_tx_mbuf = NULL;
                }
-               sc->bnx_txcnt--;
-               BNX_INC(sc->bnx_tx_saved_considx, BGE_TX_RING_CNT);
+               txr->bnx_tx_cnt--;
+               BNX_INC(txr->bnx_tx_saved_considx, BGE_TX_RING_CNT);
        }
 
-       if ((BGE_TX_RING_CNT - sc->bnx_txcnt) >=
+       if ((BGE_TX_RING_CNT - txr->bnx_tx_cnt) >=
            (BNX_NSEG_RSVD + BNX_NSEG_SPARE))
-               ifp->if_flags &= ~IFF_OACTIVE;
+               ifq_clr_oactive(&ifp->if_snd);
 
-       if (sc->bnx_txcnt == 0)
+       if (txr->bnx_tx_cnt == 0)
                ifp->if_timer = 0;
 
        if (!ifq_is_empty(&ifp->if_snd))
                if_devstart(ifp);
 }
 
-#ifdef DEVICE_POLLING
+#ifdef IFPOLL_ENABLE
 
 static void
-bnx_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
+bnx_npoll(struct ifnet *ifp, struct ifpoll_info *info)
 {
        struct bnx_softc *sc = ifp->if_softc;
+
+       ASSERT_IFNET_SERIALIZED_ALL(ifp);
+
+       if (info != NULL) {
+               int cpuid = sc->bnx_npoll.ifpc_cpuid;
+
+               info->ifpi_rx[cpuid].poll_func = bnx_npoll_compat;
+               info->ifpi_rx[cpuid].arg = NULL;
+               info->ifpi_rx[cpuid].serializer = &sc->bnx_main_serialize;
+
+               if (ifp->if_flags & IFF_RUNNING)
+                       bnx_disable_intr(sc);
+               ifq_set_cpuid(&ifp->if_snd, cpuid);
+       } else {
+               if (ifp->if_flags & IFF_RUNNING)
+                       bnx_enable_intr(sc);
+               ifq_set_cpuid(&ifp->if_snd, sc->bnx_tx_ring[0].bnx_tx_cpuid);
+       }
+}
+
+static void
+bnx_npoll_compat(struct ifnet *ifp, void *arg __unused, int cycle)
+{
+       struct bnx_softc *sc = ifp->if_softc;
+       struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; /* XXX */
+       struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0]; /* XXX */
        struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
        uint16_t rx_prod, tx_cons;
 
-       switch(cmd) {
-       case POLL_REGISTER:
-               bnx_disable_intr(sc);
-               break;
-       case POLL_DEREGISTER:
-               bnx_enable_intr(sc);
-               break;
-       case POLL_AND_CHECK_STATUS:
+       ASSERT_SERIALIZED(&sc->bnx_main_serialize);
+
+       if (sc->bnx_npoll.ifpc_stcount-- == 0) {
+               sc->bnx_npoll.ifpc_stcount = sc->bnx_npoll.ifpc_stfrac;
                /*
                 * Process link state changes.
                 */
                bnx_link_poll(sc);
-               /* Fall through */
-       case POLL_ONLY:
-               sc->bnx_status_tag = sblk->bge_status_tag;
-               /*
-                * Use a load fence to ensure that status_tag
-                * is saved  before rx_prod and tx_cons.
-                */
-               cpu_lfence();
-
-               rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
-               tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
-               if (ifp->if_flags & IFF_RUNNING) {
-                       rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
-                       if (sc->bnx_rx_saved_considx != rx_prod)
-                               bnx_rxeof(sc, rx_prod);
-
-                       tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
-                       if (sc->bnx_tx_saved_considx != tx_cons)
-                               bnx_txeof(sc, tx_cons);
-               }
-               break;
        }
+
+       sc->bnx_status_tag = sblk->bge_status_tag;
+
+       /*
+        * Use a load fence to ensure that status_tag is saved
+        * before rx_prod and tx_cons.
+        */
+       cpu_lfence();
+
+       lwkt_serialize_enter(&ret->bnx_rx_ret_serialize);
+       rx_prod = *ret->bnx_rx_considx;
+       if (ret->bnx_rx_saved_considx != rx_prod)
+               bnx_rxeof(ret, rx_prod, cycle);
+       lwkt_serialize_exit(&ret->bnx_rx_ret_serialize);
+
+       lwkt_serialize_enter(&txr->bnx_tx_serialize);
+       tx_cons = *txr->bnx_tx_considx;
+       if (txr->bnx_tx_saved_considx != tx_cons)
+               bnx_txeof(txr, tx_cons);
+       lwkt_serialize_exit(&txr->bnx_tx_serialize);
 }
 
-#endif
+#endif /* IFPOLL_ENABLE */
 
 static void
 bnx_intr_legacy(void *xsc)
@@ -2711,9 +2653,10 @@ bnx_intr(struct bnx_softc *sc)
 {
        struct ifnet *ifp = &sc->arpcom.ac_if;
        struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
-       uint16_t rx_prod, tx_cons;
        uint32_t status;
 
+       ASSERT_SERIALIZED(&sc->bnx_main_serialize);
+
        sc->bnx_status_tag = sblk->bge_status_tag;
        /*
         * Use a load fence to ensure that status_tag is saved 
@@ -2721,34 +2664,38 @@ bnx_intr(struct bnx_softc *sc)
         */
        cpu_lfence();
 
-       rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
-       tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
        status = sblk->bge_status;
 
        if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) || sc->bnx_link_evt)
                bnx_link_poll(sc);
 
        if (ifp->if_flags & IFF_RUNNING) {
-               if (sc->bnx_rx_saved_considx != rx_prod)
-                       bnx_rxeof(sc, rx_prod);
+               struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; /* XXX */
+               struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0]; /* XXX */
+               uint16_t rx_prod, tx_cons;
 
-               if (sc->bnx_tx_saved_considx != tx_cons)
-                       bnx_txeof(sc, tx_cons);
+               lwkt_serialize_enter(&ret->bnx_rx_ret_serialize);
+               rx_prod = *ret->bnx_rx_considx;
+               if (ret->bnx_rx_saved_considx != rx_prod)
+                       bnx_rxeof(ret, rx_prod, -1);
+               lwkt_serialize_exit(&ret->bnx_rx_ret_serialize);
+
+               lwkt_serialize_enter(&txr->bnx_tx_serialize);
+               tx_cons = *txr->bnx_tx_considx;
+               if (txr->bnx_tx_saved_considx != tx_cons)
+                       bnx_txeof(txr, tx_cons);
+               lwkt_serialize_exit(&txr->bnx_tx_serialize);
        }
 
        bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24);
-
-       if (sc->bnx_coal_chg)
-               bnx_coal_change(sc);
 }
 
 static void
 bnx_tick(void *xsc)
 {
        struct bnx_softc *sc = xsc;
-       struct ifnet *ifp = &sc->arpcom.ac_if;
 
-       lwkt_serialize_enter(ifp->if_serializer);
+       lwkt_serialize_enter(&sc->bnx_main_serialize);
 
        KKASSERT(mycpuid == sc->bnx_stat_cpuid);
 
@@ -2768,7 +2715,7 @@ bnx_tick(void *xsc)
 
        callout_reset(&sc->bnx_stat_timer, hz, bnx_tick, sc);
 
-       lwkt_serialize_exit(ifp->if_serializer);
+       lwkt_serialize_exit(&sc->bnx_main_serialize);
 }
 
 static void
@@ -2785,12 +2732,11 @@ bnx_stats_update_regs(struct bnx_softc *sc)
                s++;
        }
 
-       ifp->if_collisions +=
+       IFNET_STAT_SET(ifp, collisions,
           (stats.dot3StatsSingleCollisionFrames +
           stats.dot3StatsMultipleCollisionFrames +
           stats.dot3StatsExcessiveCollisions +
-          stats.dot3StatsLateCollisions) -
-          ifp->if_collisions;
+          stats.dot3StatsLateCollisions));
 }
 
 /*
@@ -2798,7 +2744,8 @@ bnx_stats_update_regs(struct bnx_softc *sc)
  * pointers to descriptors.
  */
 static int
-bnx_encap(struct bnx_softc *sc, struct mbuf **m_head0, uint32_t *txidx)
+bnx_encap(struct bnx_tx_ring *txr, struct mbuf **m_head0, uint32_t *txidx,
+    int *segs_used)
 {
        struct bge_tx_bd *d = NULL;
        uint16_t csum_flags = 0, vlan_tag = 0, mss = 0;
@@ -2812,7 +2759,7 @@ bnx_encap(struct bnx_softc *sc, struct mbuf **m_head0, uint32_t *txidx)
                int tso_nsegs;
 #endif
 
-               error = bnx_setup_tso(sc, m_head0, &mss, &csum_flags);
+               error = bnx_setup_tso(txr, m_head0, &mss, &csum_flags);
                if (error)
                        return error;
                m_head = *m_head0;
@@ -2824,7 +2771,7 @@ bnx_encap(struct bnx_softc *sc, struct mbuf **m_head0, uint32_t *txidx)
                        tso_nsegs = BNX_TSO_NSTATS - 1;
                else if (tso_nsegs < 0)
                        tso_nsegs = 0;
-               sc->bnx_tsosegs[tso_nsegs]++;
+               txr->bnx_sc->bnx_tsosegs[tso_nsegs]++;
 #endif
        } else if (m_head->m_pkthdr.csum_flags & BNX_CSUM_FEATURES) {
                if (m_head->m_pkthdr.csum_flags & CSUM_IP)
@@ -2842,9 +2789,9 @@ bnx_encap(struct bnx_softc *sc, struct mbuf **m_head0, uint32_t *txidx)
        }
 
        idx = *txidx;
-       map = sc->bnx_cdata.bnx_tx_dmamap[idx];
+       map = txr->bnx_tx_buf[idx].bnx_tx_dmamap;
 
-       maxsegs = (BGE_TX_RING_CNT - sc->bnx_txcnt) - BNX_NSEG_RSVD;
+       maxsegs = (BGE_TX_RING_CNT - txr->bnx_tx_cnt) - BNX_NSEG_RSVD;
        KASSERT(maxsegs >= BNX_NSEG_SPARE,
                ("not enough segments %d", maxsegs));
 
@@ -2867,7 +2814,8 @@ bnx_encap(struct bnx_softc *sc, struct mbuf **m_head0, uint32_t *txidx)
                        goto back;
        }
 
-       if ((sc->bnx_flags & BNX_FLAG_SHORTDMA) && m_head->m_next != NULL) {
+       if ((txr->bnx_tx_flags & BNX_TX_FLAG_SHORTDMA) &&
+           m_head->m_next != NULL) {
                m_new = bnx_defrag_shortdma(m_head);
                if (m_new == NULL) {
                        error = ENOBUFS;
@@ -2876,7 +2824,8 @@ bnx_encap(struct bnx_softc *sc, struct mbuf **m_head0, uint32_t *txidx)
                *m_head0 = m_head = m_new;
        }
        if ((m_head->m_pkthdr.csum_flags & CSUM_TSO) == 0 &&
-           sc->bnx_force_defrag && m_head->m_next != NULL) {
+           (txr->bnx_tx_flags & BNX_TX_FLAG_FORCE_DEFRAG) &&
+           m_head->m_next != NULL) {
                /*
                 * Forcefully defragment mbuf chain to overcome hardware
                 * limitation which only support a single outstanding
@@ -2888,16 +2837,17 @@ bnx_encap(struct bnx_softc *sc, struct mbuf **m_head0, uint32_t *txidx)
                        *m_head0 = m_head = m_new;
        }
 
-       error = bus_dmamap_load_mbuf_defrag(sc->bnx_cdata.bnx_tx_mtag, map,
-                       m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
+       error = bus_dmamap_load_mbuf_defrag(txr->bnx_tx_mtag, map,
+           m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
        if (error)
                goto back;
+       *segs_used += nsegs;
 
        m_head = *m_head0;
-       bus_dmamap_sync(sc->bnx_cdata.bnx_tx_mtag, map, BUS_DMASYNC_PREWRITE);
+       bus_dmamap_sync(txr->bnx_tx_mtag, map, BUS_DMASYNC_PREWRITE);
 
        for (i = 0; ; i++) {
-               d = &sc->bnx_ldata.bnx_tx_ring[idx];
+               d = &txr->bnx_tx_ring[idx];
 
                d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
                d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
@@ -2917,10 +2867,10 @@ bnx_encap(struct bnx_softc *sc, struct mbuf **m_head0, uint32_t *txidx)
         * Insure that the map for this transmission is placed at
         * the array index of the last descriptor in this chain.
         */
-       sc->bnx_cdata.bnx_tx_dmamap[*txidx] = sc->bnx_cdata.bnx_tx_dmamap[idx];
-       sc->bnx_cdata.bnx_tx_dmamap[idx] = map;
-       sc->bnx_cdata.bnx_tx_chain[idx] = m_head;
-       sc->bnx_txcnt += nsegs;
+       txr->bnx_tx_buf[*txidx].bnx_tx_dmamap = txr->bnx_tx_buf[idx].bnx_tx_dmamap;
+       txr->bnx_tx_buf[idx].bnx_tx_dmamap = map;
+       txr->bnx_tx_buf[idx].bnx_tx_mbuf = m_head;
+       txr->bnx_tx_cnt += nsegs;
 
        BNX_INC(idx, BGE_TX_RING_CNT);
        *txidx = idx;
@@ -2937,29 +2887,32 @@ back:
  * to the mbuf data regions directly in the transmit descriptors.
  */
 static void
-bnx_start(struct ifnet *ifp)
+bnx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
 {
        struct bnx_softc *sc = ifp->if_softc;
+       struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; /* XXX */
        struct mbuf *m_head = NULL;
        uint32_t prodidx;
-       int need_trans;
+       int nsegs = 0;
+
+       ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
+       ASSERT_SERIALIZED(&txr->bnx_tx_serialize);
 
-       if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
+       if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
                return;
 
-       prodidx = sc->bnx_tx_prodidx;
+       prodidx = txr->bnx_tx_prodidx;
 
-       need_trans = 0;
-       while (sc->bnx_cdata.bnx_tx_chain[prodidx] == NULL) {
+       while (txr->bnx_tx_buf[prodidx].bnx_tx_mbuf == NULL) {
                /*
                 * Sanity check: avoid coming within BGE_NSEG_RSVD
                 * descriptors of the end of the ring.  Also make
                 * sure there are BGE_NSEG_SPARE descriptors for
                 * jumbo buffers' or TSO segments' defragmentation.
                 */
-               if ((BGE_TX_RING_CNT - sc->bnx_txcnt) <
+               if ((BGE_TX_RING_CNT - txr->bnx_tx_cnt) <
                    (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) {
-                       ifp->if_flags |= IFF_OACTIVE;
+                       ifq_set_oactive(&ifp->if_snd);
                        break;
                }
 
@@ -2972,28 +2925,31 @@ bnx_start(struct ifnet *ifp)
                 * don't have room, set the OACTIVE flag and wait
                 * for the NIC to drain the ring.
                 */
-               if (bnx_encap(sc, &m_head, &prodidx)) {
-                       ifp->if_flags |= IFF_OACTIVE;
-                       ifp->if_oerrors++;
+               if (bnx_encap(txr, &m_head, &prodidx, &nsegs)) {
+                       ifq_set_oactive(&ifp->if_snd);
+                       IFNET_STAT_INC(ifp, oerrors, 1);
                        break;
                }
-               need_trans = 1;
 
-               ETHER_BPF_MTAP(ifp, m_head);
-       }
-
-       if (!need_trans)
-               return;
+               if (nsegs >= txr->bnx_tx_wreg) {
+                       /* Transmit */
+                       bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, prodidx);
+                       nsegs = 0;
+               }
 
-       /* Transmit */
-       bnx_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
+               ETHER_BPF_MTAP(ifp, m_head);
 
-       sc->bnx_tx_prodidx = prodidx;
+               /*
+                * Set a timeout in case the chip goes out to lunch.
+                */
+               ifp->if_timer = 5;
+       }
 
-       /*
-        * Set a timeout in case the chip goes out to lunch.
-        */
-       ifp->if_timer = 5;
+       if (nsegs > 0) {
+               /* Transmit */
+               bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, prodidx);
+       }
+       txr->bnx_tx_prodidx = prodidx;
 }
 
 static void
@@ -3003,8 +2959,9 @@ bnx_init(void *xsc)
        struct ifnet *ifp = &sc->arpcom.ac_if;
        uint16_t *m;
        uint32_t mode;
+       int i;
 
-       ASSERT_SERIALIZED(ifp->if_serializer);
+       ASSERT_IFNET_SERIALIZED_ALL(ifp);
 
        /* Cancel pending I/O and flush buffers. */
        bnx_stop(sc);
@@ -3037,7 +2994,7 @@ bnx_init(void *xsc)
        bnx_setmulti(sc);
 
        /* Init RX ring. */
-       if (bnx_init_rx_ring_std(sc)) {
+       if (bnx_init_rx_ring_std(&sc->bnx_rx_std_ring)) {
                if_printf(ifp, "RX ring initialization failed\n");
                bnx_stop(sc);
                return;
@@ -3053,15 +3010,18 @@ bnx_init(void *xsc)
        }
 
        /* Init our RX return ring index */
-       sc->bnx_rx_saved_considx = 0;
+       for (i = 0; i < sc->bnx_rx_retcnt; ++i)
+               sc->bnx_rx_ret_ring[i].bnx_rx_saved_considx = 0;
 
        /* Init TX ring. */
-       bnx_init_tx_ring(sc);
+       for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
+               bnx_init_tx_ring(&sc->bnx_tx_ring[i]);
 
        /* Enable TX MAC state machine lockup fix. */
        mode = CSR_READ_4(sc, BGE_TX_MODE);
        mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
-       if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
+       if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
+           sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
                mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
                mode |= CSR_READ_4(sc, BGE_TX_MODE) &
                    (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
@@ -3083,7 +3043,7 @@ bnx_init(void *xsc)
        else
                CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
 
-       if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) {
+       if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI) {
                if (bootverbose) {
                        if_printf(ifp, "MSI_MODE: %#x\n",
                            CSR_READ_4(sc, BGE_MSI_MODE));
@@ -3095,8 +3055,8 @@ bnx_init(void *xsc)
 
        /* Enable host interrupts if polling(4) is not enabled. */
        PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA, 4);
-#ifdef DEVICE_POLLING
-       if (ifp->if_flags & IFF_POLLING)
+#ifdef IFPOLL_ENABLE
+       if (ifp->if_flags & IFF_NPOLLING)
                bnx_disable_intr(sc);
        else
 #endif
@@ -3105,7 +3065,7 @@ bnx_init(void *xsc)
        bnx_ifmedia_upd(ifp);
 
        ifp->if_flags |= IFF_RUNNING;
-       ifp->if_flags &= ~IFF_OACTIVE;
+       ifq_clr_oactive(&ifp->if_snd);
 
        callout_reset_bycpu(&sc->bnx_stat_timer, hz, bnx_tick, sc,
            sc->bnx_stat_cpuid);
@@ -3213,7 +3173,7 @@ bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
        struct ifreq *ifr = (struct ifreq *)data;
        int mask, error = 0;
 
-       ASSERT_SERIALIZED(ifp->if_serializer);
+       ASSERT_IFNET_SERIALIZED_ALL(ifp);
 
        switch (command) {
        case SIOCSIFMTU:
@@ -3304,10 +3264,10 @@ bnx_watchdog(struct ifnet *ifp)
 
        bnx_init(sc);
 
-       ifp->if_oerrors++;
+       IFNET_STAT_INC(ifp, oerrors, 1);
 
        if (!ifq_is_empty(&ifp->if_snd))
-               if_devstart(ifp);
+               if_devstart_sched(ifp);
 }
 
 /*
@@ -3318,8 +3278,9 @@ static void
 bnx_stop(struct bnx_softc *sc)
 {
        struct ifnet *ifp = &sc->arpcom.ac_if;
+       int i;
 
-       ASSERT_SERIALIZED(ifp->if_serializer);
+       ASSERT_IFNET_SERIALIZED_ALL(ifp);
 
        callout_stop(&sc->bnx_stat_timer);
 
@@ -3361,22 +3322,22 @@ bnx_stop(struct bnx_softc *sc)
        BNX_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
 
        /* Free the RX lists. */
-       bnx_free_rx_ring_std(sc);
+       bnx_free_rx_ring_std(&sc->bnx_rx_std_ring);
 
        /* Free jumbo RX list. */
        if (BNX_IS_JUMBO_CAPABLE(sc))
                bnx_free_rx_ring_jumbo(sc);
 
        /* Free TX buffers. */
-       bnx_free_tx_ring(sc);
+       for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
+               bnx_free_tx_ring(&sc->bnx_tx_ring[i]);
 
        sc->bnx_status_tag = 0;
        sc->bnx_link = 0;
        sc->bnx_coal_chg = 0;
 
-       sc->bnx_tx_saved_considx = BNX_TXCONS_UNSET;
-
-       ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
+       ifp->if_flags &= ~IFF_RUNNING;
+       ifq_clr_oactive(&ifp->if_snd);
        ifp->if_timer = 0;
 }
 
@@ -3390,10 +3351,10 @@ bnx_shutdown(device_t dev)
        struct bnx_softc *sc = device_get_softc(dev);
        struct ifnet *ifp = &sc->arpcom.ac_if;
 
-       lwkt_serialize_enter(ifp->if_serializer);
+       ifnet_serialize_all(ifp);
        bnx_stop(sc);
        bnx_reset(sc);
-       lwkt_serialize_exit(ifp->if_serializer);
+       ifnet_deserialize_all(ifp);
 }
 
 static int
@@ -3402,9 +3363,9 @@ bnx_suspend(device_t dev)
        struct bnx_softc *sc = device_get_softc(dev);
        struct ifnet *ifp = &sc->arpcom.ac_if;
 
-       lwkt_serialize_enter(ifp->if_serializer);
+       ifnet_serialize_all(ifp);
        bnx_stop(sc);
-       lwkt_serialize_exit(ifp->if_serializer);
+       ifnet_deserialize_all(ifp);
 
        return 0;
 }
@@ -3415,16 +3376,16 @@ bnx_resume(device_t dev)
        struct bnx_softc *sc = device_get_softc(dev);
        struct ifnet *ifp = &sc->arpcom.ac_if;
 
-       lwkt_serialize_enter(ifp->if_serializer);
+       ifnet_serialize_all(ifp);
 
        if (ifp->if_flags & IFF_UP) {
                bnx_init(sc);
 
                if (!ifq_is_empty(&ifp->if_snd))
-                       if_devstart(ifp);
+                       if_devstart_sched(ifp);
        }
 
-       lwkt_serialize_exit(ifp->if_serializer);
+       ifnet_deserialize_all(ifp);
 
        return 0;
 }
@@ -3443,50 +3404,44 @@ bnx_setpromisc(struct bnx_softc *sc)
 static void
 bnx_dma_free(struct bnx_softc *sc)
 {
+       struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring;
        int i;
 
-       /* Destroy RX mbuf DMA stuffs. */
-       if (sc->bnx_cdata.bnx_rx_mtag != NULL) {
-               for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
-                       bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag,
-                           sc->bnx_cdata.bnx_rx_std_dmamap[i]);
-               }
-               bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag,
-                                  sc->bnx_cdata.bnx_rx_tmpmap);
-               bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag);
+       /* Destroy RX return rings */
+       if (sc->bnx_rx_ret_ring != NULL) {
+               for (i = 0; i < sc->bnx_rx_retcnt; ++i)
+                       bnx_destroy_rx_ret_ring(&sc->bnx_rx_ret_ring[i]);
+               kfree(sc->bnx_rx_ret_ring, M_DEVBUF);
        }
 
-       /* Destroy TX mbuf DMA stuffs. */
-       if (sc->bnx_cdata.bnx_tx_mtag != NULL) {
-               for (i = 0; i < BGE_TX_RING_CNT; i++) {
-                       bus_dmamap_destroy(sc->bnx_cdata.bnx_tx_mtag,
-                           sc->bnx_cdata.bnx_tx_dmamap[i]);
+       /* Destroy RX mbuf DMA stuffs. */
+       if (std->bnx_rx_mtag != NULL) {
+               for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
+                       KKASSERT(std->bnx_rx_std_buf[i].bnx_rx_mbuf == NULL);
+                       bus_dmamap_destroy(std->bnx_rx_mtag,
+                           std->bnx_rx_std_buf[i].bnx_rx_dmamap);
                }
-               bus_dma_tag_destroy(sc->bnx_cdata.bnx_tx_mtag);
+               bus_dma_tag_destroy(std->bnx_rx_mtag);
        }
 
        /* Destroy standard RX ring */
-       bnx_dma_block_free(sc->bnx_cdata.bnx_rx_std_ring_tag,
-                          sc->bnx_cdata.bnx_rx_std_ring_map,
-                          sc->bnx_ldata.bnx_rx_std_ring);
+       bnx_dma_block_free(std->bnx_rx_std_ring_tag,
+           std->bnx_rx_std_ring_map, std->bnx_rx_std_ring);
+
+       /* Destroy TX rings */
+       if (sc->bnx_tx_ring != NULL) {
+               for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
+                       bnx_destroy_tx_ring(&sc->bnx_tx_ring[i]);
+               kfree(sc->bnx_tx_ring, M_DEVBUF);
+       }
 
        if (BNX_IS_JUMBO_CAPABLE(sc))
                bnx_free_jumbo_mem(sc);
 
-       /* Destroy RX return ring */
-       bnx_dma_block_free(sc->bnx_cdata.bnx_rx_return_ring_tag,
-                          sc->bnx_cdata.bnx_rx_return_ring_map,
-                          sc->bnx_ldata.bnx_rx_return_ring);
-
-       /* Destroy TX ring */
-       bnx_dma_block_free(sc->bnx_cdata.bnx_tx_ring_tag,
-                          sc->bnx_cdata.bnx_tx_ring_map,
-                          sc->bnx_ldata.bnx_tx_ring);
-
        /* Destroy status block */
        bnx_dma_block_free(sc->bnx_cdata.bnx_status_tag,
-                          sc->bnx_cdata.bnx_status_map,
-                          sc->bnx_ldata.bnx_status_block);
+           sc->bnx_cdata.bnx_status_map,
+           sc->bnx_ldata.bnx_status_block);
 
        /* Destroy the parent tag */
        if (sc->bnx_cdata.bnx_parent_tag != NULL)
@@ -3494,11 +3449,11 @@ bnx_dma_free(struct bnx_softc *sc)
 }
 
 static int
-bnx_dma_alloc(struct bnx_softc *sc)
+bnx_dma_alloc(device_t dev)
 {
-       struct ifnet *ifp = &sc->arpcom.ac_if;
-       bus_size_t txmaxsz;
-       int i, error;
+       struct bnx_softc *sc = device_get_softc(dev);
+       struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring;
+       int i, error, mbx;
 
        /*
         * Allocate the parent bus DMA tag appropriate for PCI.
@@ -3511,90 +3466,56 @@ bnx_dma_alloc(struct bnx_softc *sc)
         * state machine will lockup and cause the device to hang.
         */
        error = bus_dma_tag_create(NULL, 1, BGE_DMA_BOUNDARY_4G,
-                                  BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
-                                  NULL, NULL,
-                                  BUS_SPACE_MAXSIZE_32BIT, 0,
-                                  BUS_SPACE_MAXSIZE_32BIT,
-                                  0, &sc->bnx_cdata.bnx_parent_tag);
+           BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
+           BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
+           0, &sc->bnx_cdata.bnx_parent_tag);
        if (error) {
-               if_printf(ifp, "could not allocate parent dma tag\n");
+               device_printf(dev, "could not create parent DMA tag\n");
                return error;
        }
 
        /*
-        * Create DMA tag and maps for RX mbufs.
+        * Create DMA stuffs for status block.
         */
-       error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0,
-                                  BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
-                                  NULL, NULL, MCLBYTES, 1, MCLBYTES,
-                                  BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
-                                  &sc->bnx_cdata.bnx_rx_mtag);
-       if (error) {
-               if_printf(ifp, "could not allocate RX mbuf dma tag\n");
-               return error;
-       }
-
-       error = bus_dmamap_create(sc->bnx_cdata.bnx_rx_mtag,
-                                 BUS_DMA_WAITOK, &sc->bnx_cdata.bnx_rx_tmpmap);
+       error = bnx_dma_block_alloc(sc, BGE_STATUS_BLK_SZ,
+           &sc->bnx_cdata.bnx_status_tag,
+           &sc->bnx_cdata.bnx_status_map,
+           (void *)&sc->bnx_ldata.bnx_status_block,
+           &sc->bnx_ldata.bnx_status_block_paddr);
        if (error) {
-               bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag);
-               sc->bnx_cdata.bnx_rx_mtag = NULL;
+               device_printf(dev, "could not create status block\n");
                return error;
        }
 
-       for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
-               error = bus_dmamap_create(sc->bnx_cdata.bnx_rx_mtag,
-                                         BUS_DMA_WAITOK,
-                                         &sc->bnx_cdata.bnx_rx_std_dmamap[i]);
-               if (error) {
-                       int j;
-
-                       for (j = 0; j < i; ++j) {
-                               bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag,
-                                       sc->bnx_cdata.bnx_rx_std_dmamap[j]);
-                       }
-                       bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag);
-                       sc->bnx_cdata.bnx_rx_mtag = NULL;
-
-                       if_printf(ifp, "could not create DMA map for RX\n");
-                       return error;
-               }
-       }
-
        /*
-        * Create DMA tag and maps for TX mbufs.
+        * Create DMA tag and maps for RX mbufs.
         */
-       if (sc->bnx_flags & BNX_FLAG_TSO)
-               txmaxsz = IP_MAXPACKET + sizeof(struct ether_vlan_header);
-       else
-               txmaxsz = BNX_JUMBO_FRAMELEN;
+       std->bnx_sc = sc;
+       lwkt_serialize_init(&std->bnx_rx_std_serialize);
        error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0,
-                                  BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
-                                  NULL, NULL,
-                                  txmaxsz, BNX_NSEG_NEW, PAGE_SIZE,
-                                  BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK |
-                                  BUS_DMA_ONEBPAGE,
-                                  &sc->bnx_cdata.bnx_tx_mtag);
+           BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
+           NULL, NULL, MCLBYTES, 1, MCLBYTES,
+           BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, &std->bnx_rx_mtag);
        if (error) {
-               if_printf(ifp, "could not allocate TX mbuf dma tag\n");
+               device_printf(dev, "could not create RX mbuf DMA tag\n");
                return error;
        }
 
-       for (i = 0; i < BGE_TX_RING_CNT; i++) {
-               error = bus_dmamap_create(sc->bnx_cdata.bnx_tx_mtag,
-                                         BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
-                                         &sc->bnx_cdata.bnx_tx_dmamap[i]);
+       for (i = 0; i < BGE_STD_RX_RING_CNT; ++i) {
+               error = bus_dmamap_create(std->bnx_rx_mtag, BUS_DMA_WAITOK,
+                   &std->bnx_rx_std_buf[i].bnx_rx_dmamap);
                if (error) {
                        int j;
 
                        for (j = 0; j < i; ++j) {
-                               bus_dmamap_destroy(sc->bnx_cdata.bnx_tx_mtag,
-                                       sc->bnx_cdata.bnx_tx_dmamap[j]);
+                               bus_dmamap_destroy(std->bnx_rx_mtag,
+                                   std->bnx_rx_std_buf[j].bnx_rx_dmamap);
                        }
-                       bus_dma_tag_destroy(sc->bnx_cdata.bnx_tx_mtag);
-                       sc->bnx_cdata.bnx_tx_mtag = NULL;
+                       bus_dma_tag_destroy(std->bnx_rx_mtag);
+                       std->bnx_rx_mtag = NULL;
 
-                       if_printf(ifp, "could not create DMA map for TX\n");
+                       device_printf(dev,
+                           "could not create %dth RX mbuf DMA map\n", i);
                        return error;
                }
        }
@@ -3603,64 +3524,79 @@ bnx_dma_alloc(struct bnx_softc *sc)
         * Create DMA stuffs for standard RX ring.
         */
        error = bnx_dma_block_alloc(sc, BGE_STD_RX_RING_SZ,
-                                   &sc->bnx_cdata.bnx_rx_std_ring_tag,
-                                   &sc->bnx_cdata.bnx_rx_std_ring_map,
-                                   (void *)&sc->bnx_ldata.bnx_rx_std_ring,
-                                   &sc->bnx_ldata.bnx_rx_std_ring_paddr);
+           &std->bnx_rx_std_ring_tag,
+           &std->bnx_rx_std_ring_map,
+           (void *)&std->bnx_rx_std_ring,
+           &std->bnx_rx_std_ring_paddr);
        if (error) {
-               if_printf(ifp, "could not create std RX ring\n");
+               device_printf(dev, "could not create std RX ring\n");
                return error;
        }
 
        /*
-        * Create jumbo buffer pool.
+        * Create RX return rings
         */
-       if (BNX_IS_JUMBO_CAPABLE(sc)) {
-               error = bnx_alloc_jumbo_mem(sc);
+       sc->bnx_rx_ret_ring = kmalloc_cachealign(
+           sizeof(struct bnx_rx_ret_ring) * sc->bnx_rx_retcnt, M_DEVBUF,
+           M_WAITOK | M_ZERO);
+       for (i = 0; i < sc->bnx_rx_retcnt; ++i) {
+               struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i];
+
+               ret->bnx_sc = sc;
+               ret->bnx_std = std;
+
+               /* XXX */
+               ret->bnx_rx_considx =
+               &sc->bnx_ldata.bnx_status_block->bge_idx[0].bge_rx_prod_idx;
+
+               error = bnx_create_rx_ret_ring(ret);
                if (error) {
-                       if_printf(ifp, "could not create jumbo buffer pool\n");
+                       device_printf(dev,
+                           "could not create %dth RX ret ring\n", i);
                        return error;
                }
        }
 
        /*
-        * Create DMA stuffs for RX return ring.
+        * Create TX rings
         */
-       error = bnx_dma_block_alloc(sc,
-           BGE_RX_RTN_RING_SZ(sc->bnx_return_ring_cnt),
-           &sc->bnx_cdata.bnx_rx_return_ring_tag,
-           &sc->bnx_cdata.bnx_rx_return_ring_map,
-           (void *)&sc->bnx_ldata.bnx_rx_return_ring,
-           &sc->bnx_ldata.bnx_rx_return_ring_paddr);
-       if (error) {
-               if_printf(ifp, "could not create RX ret ring\n");
-               return error;
-       }
+       mbx = BGE_MBX_TX_HOST_PROD0_LO;
+       sc->bnx_tx_ring = kmalloc_cachealign(
+           sizeof(struct bnx_tx_ring) * sc->bnx_tx_ringcnt, M_DEVBUF,
+           M_WAITOK | M_ZERO);
+       for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
+               struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
+
+               txr->bnx_sc = sc;
+               txr->bnx_tx_mbx = mbx;
+
+               if (mbx & 0x4)
+                       mbx -= 0x4;
+               else
+                       mbx += 0xc;
 
-       /*
-        * Create DMA stuffs for TX ring.
-        */
-       error = bnx_dma_block_alloc(sc, BGE_TX_RING_SZ,
-                                   &sc->bnx_cdata.bnx_tx_ring_tag,
-                                   &sc->bnx_cdata.bnx_tx_ring_map,
-                                   (void *)&sc->bnx_ldata.bnx_tx_ring,
-                                   &sc->bnx_ldata.bnx_tx_ring_paddr);
-       if (error) {
-               if_printf(ifp, "could not create TX ring\n");
-               return error;
+               /* XXX */
+               txr->bnx_tx_considx =
+               &sc->bnx_ldata.bnx_status_block->bge_idx[0].bge_tx_cons_idx;
+
+               error = bnx_create_tx_ring(txr);
+               if (error) {
+                       device_printf(dev,
+                           "could not create %dth TX ring\n", i);
+                       return error;
+               }
        }
 
        /*
-        * Create DMA stuffs for status block.
+        * Create jumbo buffer pool.
         */
-       error = bnx_dma_block_alloc(sc, BGE_STATUS_BLK_SZ,
-                                   &sc->bnx_cdata.bnx_status_tag,
-                                   &sc->bnx_cdata.bnx_status_map,
-                                   (void *)&sc->bnx_ldata.bnx_status_block,
-                                   &sc->bnx_ldata.bnx_status_block_paddr);
-       if (error) {
-               if_printf(ifp, "could not create status block\n");
-               return error;
+       if (BNX_IS_JUMBO_CAPABLE(sc)) {
+               error = bnx_alloc_jumbo_mem(sc);
+               if (error) {
+                       device_printf(dev,
+                           "could not create jumbo buffer pool\n");
+                       return error;
+               }
        }
 
        return 0;
@@ -3874,7 +3810,7 @@ bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal,
        struct ifnet *ifp = &sc->arpcom.ac_if;
        int error = 0, v;
 
-       lwkt_serialize_enter(ifp->if_serializer);
+       ifnet_serialize_all(ifp);
 
        v = *coal;
        error = sysctl_handle_int(oidp, &v, 0, req);
@@ -3884,10 +3820,13 @@ bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal,
                } else {
                        *coal = v;
                        sc->bnx_coal_chg |= coal_chg_mask;
+
+                       /* Commit changes */
+                       bnx_coal_change(sc);
                }
        }
 
-       lwkt_serialize_exit(ifp->if_serializer);
+       ifnet_deserialize_all(ifp);
        return error;
 }
 
@@ -3895,15 +3834,14 @@ static void
 bnx_coal_change(struct bnx_softc *sc)
 {
        struct ifnet *ifp = &sc->arpcom.ac_if;
-       uint32_t val;
 
-       ASSERT_SERIALIZED(ifp->if_serializer);
+       ASSERT_IFNET_SERIALIZED_ALL(ifp);
 
        if (sc->bnx_coal_chg & BNX_RX_COAL_TICKS_CHG) {
                CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS,
                            sc->bnx_rx_coal_ticks);
                DELAY(10);
-               val = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
+               CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
 
                if (bootverbose) {
                        if_printf(ifp, "rx_coal_ticks -> %u\n",
@@ -3915,7 +3853,7 @@ bnx_coal_change(struct bnx_softc *sc)
                CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS,
                            sc->bnx_tx_coal_ticks);
                DELAY(10);
-               val = CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS);
+               CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS);
 
                if (bootverbose) {
                        if_printf(ifp, "tx_coal_ticks -> %u\n",
@@ -3927,7 +3865,7 @@ bnx_coal_change(struct bnx_softc *sc)
                CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS,
                            sc->bnx_rx_coal_bds);
                DELAY(10);
-               val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
+               CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
 
                if (bootverbose) {
                        if_printf(ifp, "rx_coal_bds -> %u\n",
@@ -3939,7 +3877,7 @@ bnx_coal_change(struct bnx_softc *sc)
                CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS,
                            sc->bnx_tx_coal_bds);
                DELAY(10);
-               val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS);
+               CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS);
 
                if (bootverbose) {
                        if_printf(ifp, "tx_coal_bds -> %u\n",
@@ -3951,7 +3889,7 @@ bnx_coal_change(struct bnx_softc *sc)
                CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT,
                    sc->bnx_rx_coal_bds_int);
                DELAY(10);
-               val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT);
+               CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT);
 
                if (bootverbose) {
                        if_printf(ifp, "rx_coal_bds_int -> %u\n",
@@ -3963,7 +3901,7 @@ bnx_coal_change(struct bnx_softc *sc)
                CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT,
                    sc->bnx_tx_coal_bds_int);
                DELAY(10);
-               val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT);
+               CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT);
 
                if (bootverbose) {
                        if_printf(ifp, "tx_coal_bds_int -> %u\n",
@@ -3975,50 +3913,59 @@ bnx_coal_change(struct bnx_softc *sc)
 }
 
 static void
-bnx_intr_check(void *xsc)
+bnx_check_intr(void *xintr)
 {
-       struct bnx_softc *sc = xsc;
-       struct ifnet *ifp = &sc->arpcom.ac_if;
-       struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
+       struct bnx_intr_data *intr = xintr;
+       struct bnx_rx_ret_ring *ret;
+       struct bnx_tx_ring *txr;
+       struct ifnet *ifp;
 
-       lwkt_serialize_enter(ifp->if_serializer);
+       lwkt_serialize_enter(intr->bnx_intr_serialize);
 
-       KKASSERT(mycpuid == sc->bnx_intr_cpuid);
+       KKASSERT(mycpuid == intr->bnx_intr_cpuid);
 
-       if ((ifp->if_flags & (IFF_RUNNING | IFF_POLLING)) != IFF_RUNNING) {
-               lwkt_serialize_exit(ifp->if_serializer);
+       ifp = &intr->bnx_sc->arpcom.ac_if;
+       if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) {
+               lwkt_serialize_exit(intr->bnx_intr_serialize);
                return;
        }
 
-       if (sblk->bge_idx[0].bge_rx_prod_idx != sc->bnx_rx_saved_considx ||
-           sblk->bge_idx[0].bge_tx_cons_idx != sc->bnx_tx_saved_considx) {
-               if (sc->bnx_rx_check_considx == sc->bnx_rx_saved_considx &&
-                   sc->bnx_tx_check_considx == sc->bnx_tx_saved_considx) {
-                       if (!sc->bnx_intr_maylose) {
-                               sc->bnx_intr_maylose = TRUE;
+       txr = intr->bnx_txr;
+       ret = intr->bnx_ret;
+
+       if (*ret->bnx_rx_considx != ret->bnx_rx_saved_considx ||
+           *txr->bnx_tx_considx != txr->bnx_tx_saved_considx) {
+               if (intr->bnx_rx_check_considx == ret->bnx_rx_saved_considx &&
+                   intr->bnx_tx_check_considx == txr->bnx_tx_saved_considx) {
+                       if (!intr->bnx_intr_maylose) {
+                               intr->bnx_intr_maylose = TRUE;
                                goto done;
                        }
                        if (bootverbose)
                                if_printf(ifp, "lost interrupt\n");
-                       bnx_msi(sc);
+                       intr->bnx_intr_func(intr->bnx_intr_arg);
                }
        }
-       sc->bnx_intr_maylose = FALSE;
-       sc->bnx_rx_check_considx = sc->bnx_rx_saved_considx;
-       sc->bnx_tx_check_considx = sc->bnx_tx_saved_considx;
+       intr->bnx_intr_maylose = FALSE;
+       intr->bnx_rx_check_considx = ret->bnx_rx_saved_considx;
+       intr->bnx_tx_check_considx = txr->bnx_tx_saved_considx;
 
 done:
-       callout_reset(&sc->bnx_intr_timer, BNX_INTR_CKINTVL,
-           bnx_intr_check, sc);
-       lwkt_serialize_exit(ifp->if_serializer);
+       callout_reset(&intr->bnx_intr_timer, BNX_INTR_CKINTVL,
+           intr->bnx_intr_check, intr);
+       lwkt_serialize_exit(intr->bnx_intr_serialize);
 }
 
 static void
 bnx_enable_intr(struct bnx_softc *sc)
 {
        struct ifnet *ifp = &sc->arpcom.ac_if;
+       int i;
 
-       lwkt_serialize_handler_enable(ifp->if_serializer);
+       for (i = 0; i < sc->bnx_intr_cnt; ++i) {
+               lwkt_serialize_handler_enable(
+                   sc->bnx_intr_data[i].bnx_intr_serialize);
+       }
 
        /*
         * Enable interrupt.
@@ -4043,23 +3990,35 @@ bnx_enable_intr(struct bnx_softc *sc)
        BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
 
        if (sc->bnx_flags & BNX_FLAG_STATUSTAG_BUG) {
-               sc->bnx_intr_maylose = FALSE;
-               sc->bnx_rx_check_considx = 0;
-               sc->bnx_tx_check_considx = 0;
-
                if (bootverbose)
                        if_printf(ifp, "status tag bug workaround\n");
 
-               /* 10ms check interval */
-               callout_reset_bycpu(&sc->bnx_intr_timer, BNX_INTR_CKINTVL,
-                   bnx_intr_check, sc, sc->bnx_intr_cpuid);
+               for (i = 0; i < sc->bnx_intr_cnt; ++i) {
+                       struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
+
+                       intr->bnx_intr_maylose = FALSE;
+                       intr->bnx_rx_check_considx = 0;
+                       intr->bnx_tx_check_considx = 0;
+                       callout_reset_bycpu(&intr->bnx_intr_timer,
+                           BNX_INTR_CKINTVL, intr->bnx_intr_check, intr,
+                           intr->bnx_intr_cpuid);
+               }
        }
 }
 
 static void
 bnx_disable_intr(struct bnx_softc *sc)
 {
-       struct ifnet *ifp = &sc->arpcom.ac_if;
+       int i;
+
+       for (i = 0; i < sc->bnx_intr_cnt; ++i) {
+               struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
+
+               callout_stop(&intr->bnx_intr_timer);
+               intr->bnx_intr_maylose = FALSE;
+               intr->bnx_rx_check_considx = 0;
+               intr->bnx_tx_check_considx = 0;
+       }
 
        /*
         * Mask the interrupt when we start polling.
@@ -4072,12 +4031,11 @@ bnx_disable_intr(struct bnx_softc *sc)
         */
        bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
 
-       callout_stop(&sc->bnx_intr_timer);
-       sc->bnx_intr_maylose = FALSE;
-       sc->bnx_rx_check_considx = 0;
-       sc->bnx_tx_check_considx = 0;
-
-       lwkt_serialize_handler_disable(ifp->if_serializer);
+       sc->bnx_npoll.ifpc_stcount = 0;
+       for (i = 0; i < sc->bnx_intr_cnt; ++i) {
+               lwkt_serialize_handler_disable(
+                   sc->bnx_intr_data[i].bnx_intr_serialize);
+       }
 }
 
 static int
@@ -4113,8 +4071,6 @@ bnx_get_eaddr_nvram(struct bnx_softc *sc, uint8_t ether_addr[])
                        mac_offset = BGE_EE_MAC_OFFSET_5717;
                if (f > 1)
                        mac_offset += BGE_EE_MAC_OFFSET_5717_OFF;
-       } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
-               mac_offset = BGE_EE_MAC_OFFSET_5906;
        }
 
        return bnx_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN);
@@ -4237,7 +4193,8 @@ bnx_dma_swap_options(struct bnx_softc *sc)
 #if BYTE_ORDER == BIG_ENDIAN
        dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME;
 #endif
-       if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
+       if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
+           sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
                dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA |
                    BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE |
                    BGE_MODECTL_HTX2B_ENABLE;
@@ -4246,7 +4203,7 @@ bnx_dma_swap_options(struct bnx_softc *sc)
 }
 
 static int
-bnx_setup_tso(struct bnx_softc *sc, struct mbuf **mp,
+bnx_setup_tso(struct bnx_tx_ring *txr, struct mbuf **mp,
     uint16_t *mss0, uint16_t *flags0)
 {
        struct mbuf *m;
@@ -4292,3 +4249,380 @@ bnx_setup_tso(struct bnx_softc *sc, struct mbuf **mp,
 
        return 0;
 }
+
+static int
+bnx_create_tx_ring(struct bnx_tx_ring *txr)
+{
+       bus_size_t txmaxsz, txmaxsegsz;
+       int i, error;
+
+       lwkt_serialize_init(&txr->bnx_tx_serialize);
+
+       /*
+        * Create DMA tag and maps for TX mbufs.
+        */
+       if (txr->bnx_sc->bnx_flags & BNX_FLAG_TSO)
+               txmaxsz = IP_MAXPACKET + sizeof(struct ether_vlan_header);
+       else
+               txmaxsz = BNX_JUMBO_FRAMELEN;
+       if (txr->bnx_sc->bnx_asicrev == BGE_ASICREV_BCM57766)
+               txmaxsegsz = MCLBYTES;
+       else
+               txmaxsegsz = PAGE_SIZE;
+       error = bus_dma_tag_create(txr->bnx_sc->bnx_cdata.bnx_parent_tag,
+           1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
+           txmaxsz, BNX_NSEG_NEW, txmaxsegsz,
+           BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
+           &txr->bnx_tx_mtag);
+       if (error) {
+               device_printf(txr->bnx_sc->bnx_dev,
+                   "could not create TX mbuf DMA tag\n");
+               return error;
+       }
+
+       for (i = 0; i < BGE_TX_RING_CNT; i++) {
+               error = bus_dmamap_create(txr->bnx_tx_mtag,
+                   BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
+                   &txr->bnx_tx_buf[i].bnx_tx_dmamap);
+               if (error) {
+                       int j;
+
+                       for (j = 0; j < i; ++j) {
+                               bus_dmamap_destroy(txr->bnx_tx_mtag,
+                                   txr->bnx_tx_buf[j].bnx_tx_dmamap);
+                       }
+                       bus_dma_tag_destroy(txr->bnx_tx_mtag);
+                       txr->bnx_tx_mtag = NULL;
+
+                       device_printf(txr->bnx_sc->bnx_dev,
+                           "could not create TX mbuf DMA map\n");
+                       return error;
+               }
+       }
+
+       /*
+        * Create DMA stuffs for TX ring.
+        */
+       error = bnx_dma_block_alloc(txr->bnx_sc, BGE_TX_RING_SZ,
+           &txr->bnx_tx_ring_tag,
+           &txr->bnx_tx_ring_map,
+           (void *)&txr->bnx_tx_ring,
+           &txr->bnx_tx_ring_paddr);
+       if (error) {
+               device_printf(txr->bnx_sc->bnx_dev,
+                   "could not create TX ring\n");
+               return error;
+       }
+
+       txr->bnx_tx_flags |= BNX_TX_FLAG_SHORTDMA;
+       txr->bnx_tx_wreg = BNX_TX_WREG_NSEGS;
+
+       return 0;
+}
+
+static void
+bnx_destroy_tx_ring(struct bnx_tx_ring *txr)
+{
+       /* Destroy TX mbuf DMA stuffs. */
+       if (txr->bnx_tx_mtag != NULL) {
+               int i;
+
+               for (i = 0; i < BGE_TX_RING_CNT; i++) {
+                       KKASSERT(txr->bnx_tx_buf[i].bnx_tx_mbuf == NULL);
+                       bus_dmamap_destroy(txr->bnx_tx_mtag,
+                           txr->bnx_tx_buf[i].bnx_tx_dmamap);
+               }
+               bus_dma_tag_destroy(txr->bnx_tx_mtag);
+       }
+
+       /* Destroy TX ring */
+       bnx_dma_block_free(txr->bnx_tx_ring_tag,
+           txr->bnx_tx_ring_map, txr->bnx_tx_ring);
+}
+
+static int
+bnx_sysctl_force_defrag(SYSCTL_HANDLER_ARGS)
+{
+       struct bnx_softc *sc = (void *)arg1;
+       struct ifnet *ifp = &sc->arpcom.ac_if;
+       struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0];
+       int error, defrag, i;
+
+       if (txr->bnx_tx_flags & BNX_TX_FLAG_FORCE_DEFRAG)
+               defrag = 1;
+       else
+               defrag = 0;
+
+       error = sysctl_handle_int(oidp, &defrag, 0, req);
+       if (error || req->newptr == NULL)
+               return error;
+
+       ifnet_serialize_all(ifp);
+       for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
+               txr = &sc->bnx_tx_ring[i];
+               if (defrag)
+                       txr->bnx_tx_flags |= BNX_TX_FLAG_FORCE_DEFRAG;
+               else
+                       txr->bnx_tx_flags &= ~BNX_TX_FLAG_FORCE_DEFRAG;
+       }
+       ifnet_deserialize_all(ifp);
+
+       return 0;
+}
+
+static int
+bnx_sysctl_tx_wreg(SYSCTL_HANDLER_ARGS)
+{
+       struct bnx_softc *sc = (void *)arg1;
+       struct ifnet *ifp = &sc->arpcom.ac_if;
+       struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0];
+       int error, tx_wreg, i;
+
+       tx_wreg = txr->bnx_tx_wreg;
+       error = sysctl_handle_int(oidp, &tx_wreg, 0, req);
+       if (error || req->newptr == NULL)
+               return error;
+
+       ifnet_serialize_all(ifp);
+       for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
+               sc->bnx_tx_ring[i].bnx_tx_wreg = tx_wreg;
+       ifnet_deserialize_all(ifp);
+
+       return 0;
+}
+
+static int
+bnx_create_rx_ret_ring(struct bnx_rx_ret_ring *ret)
+{
+       int error;
+
+       lwkt_serialize_init(&ret->bnx_rx_ret_serialize);
+
+       /*
+        * Create DMA stuffs for RX return ring.
+        */
+       error = bnx_dma_block_alloc(ret->bnx_sc,
+           BGE_RX_RTN_RING_SZ(BNX_RETURN_RING_CNT),
+           &ret->bnx_rx_ret_ring_tag,
+           &ret->bnx_rx_ret_ring_map,
+           (void *)&ret->bnx_rx_ret_ring,
+           &ret->bnx_rx_ret_ring_paddr);
+       if (error) {
+               device_printf(ret->bnx_sc->bnx_dev,
+                   "could not create RX ret ring\n");
+               return error;
+       }
+
+       /* Shadow standard ring's RX mbuf DMA tag */
+       ret->bnx_rx_mtag = ret->bnx_std->bnx_rx_mtag;
+
+       /*
+        * Create tmp DMA map for RX mbufs.
+        */
+       error = bus_dmamap_create(ret->bnx_rx_mtag, BUS_DMA_WAITOK,
+           &ret->bnx_rx_tmpmap);
+       if (error) {
+               device_printf(ret->bnx_sc->bnx_dev,
+                   "could not create tmp RX mbuf DMA map\n");
+               ret->bnx_rx_mtag = NULL;
+               return error;
+       }
+       return 0;
+}
+
+static void
+bnx_destroy_rx_ret_ring(struct bnx_rx_ret_ring *ret)
+{
+       /* Destroy tmp RX mbuf DMA map */
+       if (ret->bnx_rx_mtag != NULL)
+               bus_dmamap_destroy(ret->bnx_rx_mtag, ret->bnx_rx_tmpmap);
+
+       /* Destroy RX return ring */
+       bnx_dma_block_free(ret->bnx_rx_ret_ring_tag,
+           ret->bnx_rx_ret_ring_map, ret->bnx_rx_ret_ring);
+}
+
+static int
+bnx_alloc_intr(struct bnx_softc *sc)
+{
+       struct bnx_intr_data *intr;
+       u_int intr_flags;
+
+       sc->bnx_intr_cnt = 1;
+
+       intr = &sc->bnx_intr_data[0];
+       intr->bnx_sc = sc;
+       intr->bnx_ret = &sc->bnx_rx_ret_ring[0];
+       intr->bnx_txr = &sc->bnx_tx_ring[0];
+       intr->bnx_intr_serialize = &sc->bnx_main_serialize;
+       callout_init_mp(&intr->bnx_intr_timer);
+       intr->bnx_intr_check = bnx_check_intr;
+
+       sc->bnx_intr_type = pci_alloc_1intr(sc->bnx_dev, bnx_msi_enable,
+           &intr->bnx_intr_rid, &intr_flags);
+
+       intr->bnx_intr_res = bus_alloc_resource_any(sc->bnx_dev, SYS_RES_IRQ,
+           &intr->bnx_intr_rid, intr_flags);
+       if (intr->bnx_intr_res == NULL) {
+               device_printf(sc->bnx_dev, "could not alloc interrupt\n");
+               return ENXIO;
+       }
+
+       if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI) {
+               sc->bnx_flags |= BNX_FLAG_ONESHOT_MSI;
+               bnx_enable_msi(sc);
+
+               if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) {
+                       intr->bnx_intr_func = bnx_msi_oneshot;
+                       if (bootverbose)
+                               device_printf(sc->bnx_dev, "oneshot MSI\n");
+               } else {
+                       intr->bnx_intr_func = bnx_msi;
+               }
+       } else {
+               intr->bnx_intr_func = bnx_intr_legacy;
+       }
+       intr->bnx_intr_arg = sc;
+       intr->bnx_intr_cpuid = rman_get_cpuid(intr->bnx_intr_res);
+
+       intr->bnx_txr->bnx_tx_cpuid = intr->bnx_intr_cpuid;
+
+       return 0;
+}
+
+static int
+bnx_setup_intr(struct bnx_softc *sc)
+{
+       int error, i;
+
+       for (i = 0; i < sc->bnx_intr_cnt; ++i) {
+               struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
+
+               error = bus_setup_intr_descr(sc->bnx_dev, intr->bnx_intr_res,
+                   INTR_MPSAFE, intr->bnx_intr_func, intr->bnx_intr_arg,
+                   &intr->bnx_intr_hand, intr->bnx_intr_serialize,
+                   intr->bnx_intr_desc);
+               if (error) {
+                       device_printf(sc->bnx_dev,
+                           "could not set up %dth intr\n", i);
+                       bnx_teardown_intr(sc, i);
+                       return error;
+               }
+       }
+       return 0;
+}
+
+static void
+bnx_teardown_intr(struct bnx_softc *sc, int cnt)
+{
+       int i;
+
+       for (i = 0; i < cnt; ++i) {
+               struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
+
+               bus_teardown_intr(sc->bnx_dev, intr->bnx_intr_res,
+                   intr->bnx_intr_hand);
+       }
+}
+
+static void
+bnx_free_intr(struct bnx_softc *sc)
+{
+       struct bnx_intr_data *intr;
+
+       KKASSERT(sc->bnx_intr_cnt <= 1);
+       intr = &sc->bnx_intr_data[0];
+
+       if (intr->bnx_intr_res != NULL) {
+               bus_release_resource(sc->bnx_dev, SYS_RES_IRQ,
+                   intr->bnx_intr_rid, intr->bnx_intr_res);
+       }
+       if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI)
+               pci_release_msi(sc->bnx_dev);
+}
+
+static void
+bnx_setup_serialize(struct bnx_softc *sc)
+{
+       int i, j;
+
+       /*
+        * Allocate serializer array
+        */
+
+       /* Main + RX STD + TX + RX RET */
+       sc->bnx_serialize_cnt = 1 + 1 + sc->bnx_tx_ringcnt + sc->bnx_rx_retcnt;
+
+       sc->bnx_serialize =
+           kmalloc(sc->bnx_serialize_cnt * sizeof(struct lwkt_serialize *),
+               M_DEVBUF, M_WAITOK | M_ZERO);
+
+       /*
+        * Setup serializers
+        *
+        * NOTE: Order is critical
+        */
+
+       i = 0;
+
+       KKASSERT(i < sc->bnx_serialize_cnt);
+       sc->bnx_serialize[i++] = &sc->bnx_main_serialize;
+
+       KKASSERT(i < sc->bnx_serialize_cnt);
+       sc->bnx_serialize[i++] = &sc->bnx_rx_std_ring.bnx_rx_std_serialize;
+
+       for (j = 0; j < sc->bnx_rx_retcnt; ++j) {
+               KKASSERT(i < sc->bnx_serialize_cnt);
+               sc->bnx_serialize[i++] =
+                   &sc->bnx_rx_ret_ring[j].bnx_rx_ret_serialize;
+       }
+
+       for (j = 0; j < sc->bnx_tx_ringcnt; ++j) {
+               KKASSERT(i < sc->bnx_serialize_cnt);
+               sc->bnx_serialize[i++] =
+                   &sc->bnx_tx_ring[j].bnx_tx_serialize;
+       }
+
+       KKASSERT(i == sc->bnx_serialize_cnt);
+}
+
+static void
+bnx_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
+{
+       struct bnx_softc *sc = ifp->if_softc;
+
+       ifnet_serialize_array_enter(sc->bnx_serialize,
+           sc->bnx_serialize_cnt, slz);
+}
+
+static void
+bnx_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
+{
+       struct bnx_softc *sc = ifp->if_softc;
+
+       ifnet_serialize_array_exit(sc->bnx_serialize,
+           sc->bnx_serialize_cnt, slz);
+}
+
+static int
+bnx_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
+{
+       struct bnx_softc *sc = ifp->if_softc;
+
+       return ifnet_serialize_array_try(sc->bnx_serialize,
+           sc->bnx_serialize_cnt, slz);
+}
+
+#ifdef INVARIANTS
+
+static void
+bnx_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
+    boolean_t serialized)
+{
+       struct bnx_softc *sc = ifp->if_softc;
+
+       ifnet_serialize_array_assert(sc->bnx_serialize, sc->bnx_serialize_cnt,
+           slz, serialized);
+}
+
+#endif /* INVARIANTS */