X-Git-Url: https://gitweb.dragonflybsd.org/dragonfly.git/blobdiff_plain/aaea8fd38687ebe012024c20a7d5b8e15c61d3b3..329f90164e86186463c66a8e4d620ae5c3e7c821:/sys/dev/netif/bnx/if_bnx.c diff --git a/sys/dev/netif/bnx/if_bnx.c b/sys/dev/netif/bnx/if_bnx.c index bd00e98bad..170fcaca0f 100644 --- a/sys/dev/netif/bnx/if_bnx.c +++ b/sys/dev/netif/bnx/if_bnx.c @@ -33,8 +33,8 @@ * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $ */ - -#include "opt_polling.h" +#include "opt_bnx.h" +#include "opt_ifpoll.h" #include #include @@ -50,12 +50,16 @@ #include #include +#include +#include + #include #include #include #include #include #include +#include #include #include #include @@ -75,7 +79,9 @@ /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" -#define BNX_CSUM_FEATURES (CSUM_IP | CSUM_TCP) +#define BNX_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) + +#define BNX_INTR_CKINTVL ((10 * hz) / 1000) /* 10ms */ static const struct bnx_type { uint16_t bnx_vid; @@ -84,6 +90,8 @@ static const struct bnx_type { } bnx_devs[] = { { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717, "Broadcom BCM5717 Gigabit Ethernet" }, + { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717C, + "Broadcom BCM5717C Gigabit Ethernet" }, { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718, "Broadcom BCM5718 Gigabit Ethernet" }, { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5719, @@ -91,16 +99,31 @@ static const struct bnx_type { { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720_ALT, "Broadcom BCM5720 Gigabit Ethernet" }, + { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5725, + "Broadcom BCM5725 Gigabit Ethernet" }, + { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5727, + "Broadcom BCM5727 Gigabit Ethernet" }, + { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5762, + "Broadcom BCM5762 Gigabit Ethernet" }, + { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57761, "Broadcom BCM57761 Gigabit Ethernet" }, - { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57781, - "Broadcom BCM57781 Gigabit Ethernet" }, - { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57791, - "Broadcom BCM57791 Fast Ethernet" }, + { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57762, + "Broadcom BCM57762 Gigabit Ethernet" }, { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57765, "Broadcom BCM57765 Gigabit Ethernet" }, + { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57766, + "Broadcom BCM57766 Gigabit Ethernet" }, + { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57781, + "Broadcom BCM57781 Gigabit Ethernet" }, + { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57782, + "Broadcom BCM57782 Gigabit Ethernet" }, { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57785, "Broadcom BCM57785 Gigabit Ethernet" }, + { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57786, + "Broadcom BCM57786 Gigabit Ethernet" }, + { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57791, + "Broadcom BCM57791 Fast Ethernet" }, { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57795, "Broadcom BCM57795 Fast Ethernet" }, @@ -109,6 +132,9 @@ static const struct bnx_type { #define BNX_IS_JUMBO_CAPABLE(sc) ((sc)->bnx_flags & BNX_FLAG_JUMBO) #define BNX_IS_5717_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_5717_PLUS) +#define BNX_IS_57765_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_57765_PLUS) +#define BNX_IS_57765_FAMILY(sc) \ + ((sc)->bnx_flags & BNX_FLAG_57765_FAMILY) typedef int (*bnx_eaddr_fcn_t)(struct bnx_softc *, uint8_t[]); @@ -122,8 +148,9 @@ static int bnx_miibus_readreg(device_t, int, int); static int bnx_miibus_writereg(device_t, int, int, int); static void bnx_miibus_statchg(device_t); -#ifdef DEVICE_POLLING -static void bnx_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); +#ifdef IFPOLL_ENABLE +static void bnx_npoll(struct ifnet *, struct ifpoll_info *); +static void bnx_npoll_compat(struct ifnet *, void *, int); #endif static void bnx_intr_legacy(void *); static void bnx_msi(void *); @@ -131,10 +158,15 @@ static void bnx_msi_oneshot(void *); static void bnx_intr(struct bnx_softc *); static void bnx_enable_intr(struct bnx_softc *); static void bnx_disable_intr(struct bnx_softc *); -static void bnx_txeof(struct bnx_softc *, uint16_t); -static void bnx_rxeof(struct bnx_softc *, uint16_t); - -static void bnx_start(struct ifnet *); +static void bnx_txeof(struct bnx_tx_ring *, uint16_t); +static void bnx_rxeof(struct bnx_rx_ret_ring *, uint16_t, int); +static int bnx_alloc_intr(struct bnx_softc *); +static int bnx_setup_intr(struct bnx_softc *); +static void bnx_free_intr(struct bnx_softc *); +static void bnx_teardown_intr(struct bnx_softc *, int); +static void bnx_check_intr(void *); + +static void bnx_start(struct ifnet *, struct ifaltq_subque *); static int bnx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); static void bnx_init(void *); static void bnx_stop(struct bnx_softc *); @@ -142,6 +174,13 @@ static void bnx_watchdog(struct ifnet *); static int bnx_ifmedia_upd(struct ifnet *); static void bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *); static void bnx_tick(void *); +static void bnx_serialize(struct ifnet *, enum ifnet_serialize); +static void bnx_deserialize(struct ifnet *, enum ifnet_serialize); +static int bnx_tryserialize(struct ifnet *, enum ifnet_serialize); +#ifdef INVARIANTS +static void bnx_serialize_assert(struct ifnet *, enum ifnet_serialize, + boolean_t); +#endif static int bnx_alloc_jumbo_mem(struct bnx_softc *); static void bnx_free_jumbo_mem(struct bnx_softc *); @@ -149,24 +188,32 @@ static struct bnx_jslot *bnx_jalloc(struct bnx_softc *); static void bnx_jfree(void *); static void bnx_jref(void *); -static int bnx_newbuf_std(struct bnx_softc *, int, int); +static int bnx_newbuf_std(struct bnx_rx_ret_ring *, int, int); static int bnx_newbuf_jumbo(struct bnx_softc *, int, int); -static void bnx_setup_rxdesc_std(struct bnx_softc *, int); +static void bnx_setup_rxdesc_std(struct bnx_rx_std_ring *, int); static void bnx_setup_rxdesc_jumbo(struct bnx_softc *, int); -static int bnx_init_rx_ring_std(struct bnx_softc *); -static void bnx_free_rx_ring_std(struct bnx_softc *); +static int bnx_init_rx_ring_std(struct bnx_rx_std_ring *); +static void bnx_free_rx_ring_std(struct bnx_rx_std_ring *); static int bnx_init_rx_ring_jumbo(struct bnx_softc *); static void bnx_free_rx_ring_jumbo(struct bnx_softc *); -static void bnx_free_tx_ring(struct bnx_softc *); -static int bnx_init_tx_ring(struct bnx_softc *); -static int bnx_dma_alloc(struct bnx_softc *); +static void bnx_free_tx_ring(struct bnx_tx_ring *); +static int bnx_init_tx_ring(struct bnx_tx_ring *); +static int bnx_create_tx_ring(struct bnx_tx_ring *); +static void bnx_destroy_tx_ring(struct bnx_tx_ring *); +static int bnx_create_rx_ret_ring(struct bnx_rx_ret_ring *); +static void bnx_destroy_rx_ret_ring(struct bnx_rx_ret_ring *); +static int bnx_dma_alloc(device_t); static void bnx_dma_free(struct bnx_softc *); static int bnx_dma_block_alloc(struct bnx_softc *, bus_size_t, bus_dma_tag_t *, bus_dmamap_t *, void **, bus_addr_t *); static void bnx_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *); static struct mbuf * bnx_defrag_shortdma(struct mbuf *); -static int bnx_encap(struct bnx_softc *, struct mbuf **, uint32_t *); +static int bnx_encap(struct bnx_tx_ring *, struct mbuf **, + uint32_t *, int *); +static int bnx_setup_tso(struct bnx_tx_ring *, struct mbuf **, + uint16_t *, uint16_t *); +static void bnx_setup_serialize(struct bnx_softc *); static void bnx_reset(struct bnx_softc *); static int bnx_chipinit(struct bnx_softc *); @@ -183,10 +230,8 @@ static void bnx_writemem_ind(struct bnx_softc *, uint32_t, uint32_t); #ifdef notdef static uint32_t bnx_readreg_ind(struct bnx_softc *, uint32_t); #endif -static void bnx_writereg_ind(struct bnx_softc *, uint32_t, uint32_t); static void bnx_writemem_direct(struct bnx_softc *, uint32_t, uint32_t); static void bnx_writembx(struct bnx_softc *, int, int); -static uint8_t bnx_nvram_getbyte(struct bnx_softc *, int, uint8_t *); static int bnx_read_nvram(struct bnx_softc *, caddr_t, int, int); static uint8_t bnx_eeprom_getbyte(struct bnx_softc *, uint32_t, uint8_t *); static int bnx_read_eeprom(struct bnx_softc *, caddr_t, uint32_t, size_t); @@ -202,6 +247,8 @@ static int bnx_get_eaddr_eeprom(struct bnx_softc *, uint8_t[]); static int bnx_get_eaddr(struct bnx_softc *, uint8_t[]); static void bnx_coal_change(struct bnx_softc *); +static int bnx_sysctl_force_defrag(SYSCTL_HANDLER_ARGS); +static int bnx_sysctl_tx_wreg(SYSCTL_HANDLER_ARGS); static int bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS); static int bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS); static int bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS); @@ -232,7 +279,7 @@ static device_method_t bnx_methods[] = { DEVMETHOD(miibus_writereg, bnx_miibus_writereg), DEVMETHOD(miibus_statchg, bnx_miibus_statchg), - { 0, 0 } + DEVMETHOD_END }; static DEFINE_CLASS_0(bnx, bnx_driver, bnx_methods, sizeof(struct bnx_softc)); @@ -248,10 +295,6 @@ bnx_readmem_ind(struct bnx_softc *sc, uint32_t off) device_t dev = sc->bnx_dev; uint32_t val; - if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 && - off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) - return 0; - pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4); pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); @@ -263,35 +306,11 @@ bnx_writemem_ind(struct bnx_softc *sc, uint32_t off, uint32_t val) { device_t dev = sc->bnx_dev; - if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 && - off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) - return; - pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); } -#ifdef notdef -static uint32_t -bnx_readreg_ind(struct bnx_softc *sc, uin32_t off) -{ - device_t dev = sc->bnx_dev; - - pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); - return(pci_read_config(dev, BGE_PCI_REG_DATA, 4)); -} -#endif - -static void -bnx_writereg_ind(struct bnx_softc *sc, uint32_t off, uint32_t val) -{ - device_t dev = sc->bnx_dev; - - pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); - pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); -} - static void bnx_writemem_direct(struct bnx_softc *sc, uint32_t off, uint32_t val) { @@ -301,82 +320,16 @@ bnx_writemem_direct(struct bnx_softc *sc, uint32_t off, uint32_t val) static void bnx_writembx(struct bnx_softc *sc, int off, int val) { - if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) - off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; - CSR_WRITE_4(sc, off, val); } -static uint8_t -bnx_nvram_getbyte(struct bnx_softc *sc, int addr, uint8_t *dest) -{ - uint32_t access, byte = 0; - int i; - - /* Lock. */ - CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); - for (i = 0; i < 8000; i++) { - if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) - break; - DELAY(20); - } - if (i == 8000) - return (1); - - /* Enable access. */ - access = CSR_READ_4(sc, BGE_NVRAM_ACCESS); - CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE); - - CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc); - CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD); - for (i = 0; i < BNX_TIMEOUT * 10; i++) { - DELAY(10); - if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) { - DELAY(10); - break; - } - } - - if (i == BNX_TIMEOUT * 10) { - if_printf(&sc->arpcom.ac_if, "nvram read timed out\n"); - return (1); - } - - /* Get result. */ - byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA); - - *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF; - - /* Disable access. */ - CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access); - - /* Unlock. */ - CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1); - CSR_READ_4(sc, BGE_NVRAM_SWARB); - - return (0); -} - /* * Read a sequence of bytes from NVRAM. */ static int bnx_read_nvram(struct bnx_softc *sc, caddr_t dest, int off, int cnt) { - int err = 0, i; - uint8_t byte = 0; - - if (sc->bnx_asicrev != BGE_ASICREV_BCM5906) - return (1); - - for (i = 0; i < cnt; i++) { - err = bnx_nvram_getbyte(sc, off + i, &byte); - if (err) - break; - *(dest + i) = byte; - } - - return (err ? 1 : 0); + return (1); } /* @@ -502,10 +455,6 @@ bnx_miibus_writereg(device_t dev, int phy, int reg, int val) KASSERT(phy == sc->bnx_phyno, ("invalid phyno %d, should be %d", phy, sc->bnx_phyno)); - if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 && - (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL)) - return 0; - /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */ if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { CSR_WRITE_4(sc, BGE_MI_MODE, @@ -557,10 +506,7 @@ bnx_miibus_statchg(device_t dev) case IFM_1000_T: case IFM_1000_SX: case IFM_2500_SX: - if (sc->bnx_asicrev != BGE_ASICREV_BCM5906) - sc->bnx_link = 1; - else - sc->bnx_link = 0; + sc->bnx_link = 1; break; default: sc->bnx_link = 0; @@ -759,12 +705,13 @@ bnx_jfree(void *arg) * Intialize a standard receive ring descriptor. */ static int -bnx_newbuf_std(struct bnx_softc *sc, int i, int init) +bnx_newbuf_std(struct bnx_rx_ret_ring *ret, int i, int init) { struct mbuf *m_new = NULL; bus_dma_segment_t seg; bus_dmamap_t map; int error, nsegs; + struct bnx_rx_buf *rb; m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); if (m_new == NULL) @@ -772,45 +719,44 @@ bnx_newbuf_std(struct bnx_softc *sc, int i, int init) m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_adj(m_new, ETHER_ALIGN); - error = bus_dmamap_load_mbuf_segment(sc->bnx_cdata.bnx_rx_mtag, - sc->bnx_cdata.bnx_rx_tmpmap, m_new, - &seg, 1, &nsegs, BUS_DMA_NOWAIT); + error = bus_dmamap_load_mbuf_segment(ret->bnx_rx_mtag, + ret->bnx_rx_tmpmap, m_new, &seg, 1, &nsegs, BUS_DMA_NOWAIT); if (error) { m_freem(m_new); return error; } + rb = &ret->bnx_std->bnx_rx_std_buf[i]; + if (!init) { - bus_dmamap_sync(sc->bnx_cdata.bnx_rx_mtag, - sc->bnx_cdata.bnx_rx_std_dmamap[i], - BUS_DMASYNC_POSTREAD); - bus_dmamap_unload(sc->bnx_cdata.bnx_rx_mtag, - sc->bnx_cdata.bnx_rx_std_dmamap[i]); + bus_dmamap_sync(ret->bnx_rx_mtag, rb->bnx_rx_dmamap, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(ret->bnx_rx_mtag, rb->bnx_rx_dmamap); } - map = sc->bnx_cdata.bnx_rx_tmpmap; - sc->bnx_cdata.bnx_rx_tmpmap = sc->bnx_cdata.bnx_rx_std_dmamap[i]; - sc->bnx_cdata.bnx_rx_std_dmamap[i] = map; + map = ret->bnx_rx_tmpmap; + ret->bnx_rx_tmpmap = rb->bnx_rx_dmamap; + rb->bnx_rx_dmamap = map; - sc->bnx_cdata.bnx_rx_std_chain[i].bnx_mbuf = m_new; - sc->bnx_cdata.bnx_rx_std_chain[i].bnx_paddr = seg.ds_addr; + rb->bnx_rx_mbuf = m_new; + rb->bnx_rx_paddr = seg.ds_addr; - bnx_setup_rxdesc_std(sc, i); + bnx_setup_rxdesc_std(ret->bnx_std, i); return 0; } static void -bnx_setup_rxdesc_std(struct bnx_softc *sc, int i) +bnx_setup_rxdesc_std(struct bnx_rx_std_ring *std, int i) { - struct bnx_rxchain *rc; + const struct bnx_rx_buf *rb; struct bge_rx_bd *r; - rc = &sc->bnx_cdata.bnx_rx_std_chain[i]; - r = &sc->bnx_ldata.bnx_rx_std_ring[i]; + rb = &std->bnx_rx_std_buf[i]; + r = &std->bnx_rx_std_ring[i]; - r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_paddr); - r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_paddr); - r->bge_len = rc->bnx_mbuf->m_len; + r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rb->bnx_rx_paddr); + r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rb->bnx_rx_paddr); + r->bge_len = rb->bnx_rx_mbuf->m_len; r->bge_idx = i; r->bge_flags = BGE_RXBDFLAG_END; } @@ -855,8 +801,8 @@ bnx_newbuf_jumbo(struct bnx_softc *sc, int i, int init) paddr += ETHER_ALIGN; /* Save necessary information */ - sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_mbuf = m_new; - sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_paddr = paddr; + sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_rx_mbuf = m_new; + sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_rx_paddr = paddr; /* Set up the descriptor. */ bnx_setup_rxdesc_jumbo(sc, i); @@ -867,51 +813,50 @@ static void bnx_setup_rxdesc_jumbo(struct bnx_softc *sc, int i) { struct bge_rx_bd *r; - struct bnx_rxchain *rc; + struct bnx_rx_buf *rc; r = &sc->bnx_ldata.bnx_rx_jumbo_ring[i]; rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i]; - r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_paddr); - r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_paddr); - r->bge_len = rc->bnx_mbuf->m_len; + r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_rx_paddr); + r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_rx_paddr); + r->bge_len = rc->bnx_rx_mbuf->m_len; r->bge_idx = i; r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; } static int -bnx_init_rx_ring_std(struct bnx_softc *sc) +bnx_init_rx_ring_std(struct bnx_rx_std_ring *std) { int i, error; for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { - error = bnx_newbuf_std(sc, i, 1); + /* Use the first RX return ring's tmp RX mbuf DMA map */ + error = bnx_newbuf_std(&std->bnx_sc->bnx_rx_ret_ring[0], i, 1); if (error) return error; - }; + } - sc->bnx_std = BGE_STD_RX_RING_CNT - 1; - bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bnx_std); + std->bnx_rx_std = BGE_STD_RX_RING_CNT - 1; + bnx_writembx(std->bnx_sc, BGE_MBX_RX_STD_PROD_LO, std->bnx_rx_std); return(0); } static void -bnx_free_rx_ring_std(struct bnx_softc *sc) +bnx_free_rx_ring_std(struct bnx_rx_std_ring *std) { int i; for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { - struct bnx_rxchain *rc = &sc->bnx_cdata.bnx_rx_std_chain[i]; + struct bnx_rx_buf *rb = &std->bnx_rx_std_buf[i]; - if (rc->bnx_mbuf != NULL) { - bus_dmamap_unload(sc->bnx_cdata.bnx_rx_mtag, - sc->bnx_cdata.bnx_rx_std_dmamap[i]); - m_freem(rc->bnx_mbuf); - rc->bnx_mbuf = NULL; + if (rb->bnx_rx_mbuf != NULL) { + bus_dmamap_unload(std->bnx_rx_mtag, rb->bnx_rx_dmamap); + m_freem(rb->bnx_rx_mbuf); + rb->bnx_rx_mbuf = NULL; } - bzero(&sc->bnx_ldata.bnx_rx_std_ring[i], - sizeof(struct bge_rx_bd)); + bzero(&std->bnx_rx_std_ring[i], sizeof(struct bge_rx_bd)); } } @@ -925,7 +870,7 @@ bnx_init_rx_ring_jumbo(struct bnx_softc *sc) error = bnx_newbuf_jumbo(sc, i, 1); if (error) return error; - }; + } sc->bnx_jumbo = BGE_JUMBO_RX_RING_CNT - 1; @@ -944,11 +889,11 @@ bnx_free_rx_ring_jumbo(struct bnx_softc *sc) int i; for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { - struct bnx_rxchain *rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i]; + struct bnx_rx_buf *rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i]; - if (rc->bnx_mbuf != NULL) { - m_freem(rc->bnx_mbuf); - rc->bnx_mbuf = NULL; + if (rc->bnx_rx_mbuf != NULL) { + m_freem(rc->bnx_rx_mbuf); + rc->bnx_rx_mbuf = NULL; } bzero(&sc->bnx_ldata.bnx_rx_jumbo_ring[i], sizeof(struct bge_rx_bd)); @@ -956,32 +901,33 @@ bnx_free_rx_ring_jumbo(struct bnx_softc *sc) } static void -bnx_free_tx_ring(struct bnx_softc *sc) +bnx_free_tx_ring(struct bnx_tx_ring *txr) { int i; for (i = 0; i < BGE_TX_RING_CNT; i++) { - if (sc->bnx_cdata.bnx_tx_chain[i] != NULL) { - bus_dmamap_unload(sc->bnx_cdata.bnx_tx_mtag, - sc->bnx_cdata.bnx_tx_dmamap[i]); - m_freem(sc->bnx_cdata.bnx_tx_chain[i]); - sc->bnx_cdata.bnx_tx_chain[i] = NULL; + struct bnx_tx_buf *buf = &txr->bnx_tx_buf[i]; + + if (buf->bnx_tx_mbuf != NULL) { + bus_dmamap_unload(txr->bnx_tx_mtag, + buf->bnx_tx_dmamap); + m_freem(buf->bnx_tx_mbuf); + buf->bnx_tx_mbuf = NULL; } - bzero(&sc->bnx_ldata.bnx_tx_ring[i], - sizeof(struct bge_tx_bd)); + bzero(&txr->bnx_tx_ring[i], sizeof(struct bge_tx_bd)); } + txr->bnx_tx_saved_considx = BNX_TXCONS_UNSET; } static int -bnx_init_tx_ring(struct bnx_softc *sc) +bnx_init_tx_ring(struct bnx_tx_ring *txr) { - sc->bnx_txcnt = 0; - sc->bnx_tx_saved_considx = 0; - sc->bnx_tx_prodidx = 0; + txr->bnx_tx_cnt = 0; + txr->bnx_tx_saved_considx = 0; + txr->bnx_tx_prodidx = 0; /* Initialize transmit producer index for host-memory send ring. */ - bnx_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bnx_tx_prodidx); - bnx_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); + bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, txr->bnx_tx_prodidx); return(0); } @@ -1049,22 +995,76 @@ bnx_chipinit(struct bnx_softc *sc) i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) BNX_MEMWIN_WRITE(sc, i, 0); - /* Set up the PCI DMA control register. */ - dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD | - (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); + if (BNX_IS_57765_FAMILY(sc)) { + uint32_t val; - if (BNX_IS_5717_PLUS(sc)) { - dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT; - if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) - dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK; - /* - * Enable HW workaround for controllers that misinterpret - * a status tag update and leave interrupts permanently - * disabled. - */ - if (sc->bnx_asicrev != BGE_ASICREV_BCM5717 && - sc->bnx_asicrev != BGE_ASICREV_BCM57765) - dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA; + if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) { + mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); + val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS; + + /* Access the lower 1K of PL PCI-E block registers. */ + CSR_WRITE_4(sc, BGE_MODE_CTL, + val | BGE_MODECTL_PCIE_PL_SEL); + + val = CSR_READ_4(sc, BGE_PCIE_PL_LO_PHYCTL5); + val |= BGE_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ; + CSR_WRITE_4(sc, BGE_PCIE_PL_LO_PHYCTL5, val); + + CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); + } + if (sc->bnx_chiprev != BGE_CHIPREV_57765_AX) { + /* Fix transmit hangs */ + val = CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL); + val |= BGE_CPMU_PADRNG_CTL_RDIV2; + CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL, val); + + mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); + val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS; + + /* Access the lower 1K of DL PCI-E block registers. */ + CSR_WRITE_4(sc, BGE_MODE_CTL, + val | BGE_MODECTL_PCIE_DL_SEL); + + val = CSR_READ_4(sc, BGE_PCIE_DL_LO_FTSMAX); + val &= ~BGE_PCIE_DL_LO_FTSMAX_MASK; + val |= BGE_PCIE_DL_LO_FTSMAX_VAL; + CSR_WRITE_4(sc, BGE_PCIE_DL_LO_FTSMAX, val); + + CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); + } + + val = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK); + val &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK; + val |= BGE_CPMU_LSPD_10MB_MACCLK_6_25; + CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, val); + } + + /* + * Set up the PCI DMA control register. + */ + dma_rw_ctl = pci_read_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, 4); + /* + * Disable 32bytes cache alignment for DMA write to host memory + * + * NOTE: + * 64bytes cache alignment for DMA write to host memory is still + * enabled. + */ + dma_rw_ctl |= BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT; + if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) + dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK; + /* + * Enable HW workaround for controllers that misinterpret + * a status tag update and leave interrupts permanently + * disabled. + */ + if (sc->bnx_asicrev != BGE_ASICREV_BCM5717 && + sc->bnx_asicrev != BGE_ASICREV_BCM5762 && + !BNX_IS_57765_FAMILY(sc)) + dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA; + if (bootverbose) { + if_printf(&sc->arpcom.ac_if, "DMA read/write %#x\n", + dma_rw_ctl); } pci_write_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); @@ -1086,21 +1086,14 @@ bnx_chipinit(struct bnx_softc *sc) /* Set the timer prescaler (always 66Mhz) */ CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); - if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) { - DELAY(40); /* XXX */ - - /* Put PHY into ready state */ - BNX_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ); - CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */ - DELAY(40); - } - return(0); } static int bnx_blockinit(struct bnx_softc *sc) { + struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; + struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0]; struct bge_rcb *rcb; bus_size_t vrcb; bge_hostaddr taddr; @@ -1116,7 +1109,7 @@ bnx_blockinit(struct bnx_softc *sc) CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); /* Configure mbuf pool watermarks */ - if (BNX_IS_5717_PLUS(sc)) { + if (BNX_IS_57765_PLUS(sc)) { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); if (sc->arpcom.ac_if.if_mtu > ETHERMTU) { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e); @@ -1125,10 +1118,6 @@ bnx_blockinit(struct bnx_softc *sc) CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0); } - } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) { - CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); - CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04); - CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10); } else { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); @@ -1149,6 +1138,10 @@ bnx_blockinit(struct bnx_softc *sc) */ if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) val |= BGE_BMANMODE_NO_TX_UNDERRUN; + if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 || + sc->bnx_chipid == BGE_CHIPID_BCM5719_A0 || + sc->bnx_chipid == BGE_CHIPID_BCM5720_A0) + val |= BGE_BMANMODE_LOMBUF_ATTN; CSR_WRITE_4(sc, BGE_BMAN_MODE, val); /* Poll for buffer manager start indication */ @@ -1215,10 +1208,10 @@ bnx_blockinit(struct bnx_softc *sc) /* Initialize the standard receive producer ring control block. */ rcb = &sc->bnx_ldata.bnx_info.bnx_std_rx_rcb; rcb->bge_hostaddr.bge_addr_lo = - BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_std_ring_paddr); + BGE_ADDR_LO(sc->bnx_rx_std_ring.bnx_rx_std_ring_paddr); rcb->bge_hostaddr.bge_addr_hi = - BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_std_ring_paddr); - if (BNX_IS_5717_PLUS(sc)) { + BGE_ADDR_HI(sc->bnx_rx_std_ring.bnx_rx_std_ring_paddr); + if (BNX_IS_57765_PLUS(sc)) { /* * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32) * Bits 15-2 : Maximum RX frame size @@ -1236,9 +1229,7 @@ bnx_blockinit(struct bnx_softc *sc) */ rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); } - if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 || - sc->bnx_asicrev == BGE_ASICREV_BCM5719 || - sc->bnx_asicrev == BGE_ASICREV_BCM5720) + if (BNX_IS_5717_PLUS(sc)) rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717; else rcb->bge_nicaddr = BGE_STD_RX_RINGS; @@ -1267,9 +1258,7 @@ bnx_blockinit(struct bnx_softc *sc) rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(BNX_MAX_FRAMELEN, BGE_RCB_FLAG_RING_DISABLED); - if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 || - sc->bnx_asicrev == BGE_ASICREV_BCM5719 || - sc->bnx_asicrev == BGE_ASICREV_BCM5720) + if (BNX_IS_5717_PLUS(sc)) rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717; else rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; @@ -1285,15 +1274,6 @@ bnx_blockinit(struct bnx_softc *sc) bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); } - /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */ - if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 && - (sc->bnx_chipid == BGE_CHIPID_BCM5906_A0 || - sc->bnx_chipid == BGE_CHIPID_BCM5906_A1 || - sc->bnx_chipid == BGE_CHIPID_BCM5906_A2)) { - CSR_WRITE_4(sc, BGE_ISO_PKT_TX, - (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2); - } - /* * The BD ring replenish thresholds control how often the * hardware fetches new BD's from the producer rings in host @@ -1310,7 +1290,7 @@ bnx_blockinit(struct bnx_softc *sc) CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); } - if (BNX_IS_5717_PLUS(sc)) { + if (BNX_IS_57765_PLUS(sc)) { CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32); CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16); } @@ -1320,7 +1300,13 @@ bnx_blockinit(struct bnx_softc *sc) * in the flags field of all the TX send ring control blocks, * located in NIC memory. */ - limit = 1; + if (BNX_IS_5717_PLUS(sc)) + limit = 4; + else if (BNX_IS_57765_FAMILY(sc) || + sc->bnx_asicrev == BGE_ASICREV_BCM5762) + limit = 2; + else + limit = 1; vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; for (i = 0; i < limit; i++) { RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, @@ -1331,12 +1317,10 @@ bnx_blockinit(struct bnx_softc *sc) /* Configure send ring RCB 0 (we use only the first ring) */ vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; - BGE_HOSTADDR(taddr, sc->bnx_ldata.bnx_tx_ring_paddr); + BGE_HOSTADDR(taddr, txr->bnx_tx_ring_paddr); RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); - if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 || - sc->bnx_asicrev == BGE_ASICREV_BCM5719 || - sc->bnx_asicrev == BGE_ASICREV_BCM5720) { + if (BNX_IS_5717_PLUS(sc)) { RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717); } else { RCB_WRITE_4(sc, vrcb, bge_nicaddr, @@ -1350,12 +1334,11 @@ bnx_blockinit(struct bnx_softc *sc) * 'ring disabled' bit in the flags field of all the receive * return ring control blocks, located in NIC memory. */ - if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 || - sc->bnx_asicrev == BGE_ASICREV_BCM5719 || - sc->bnx_asicrev == BGE_ASICREV_BCM5720) { + if (BNX_IS_5717_PLUS(sc)) { /* Should be 17, use 16 until we get an SRAM map. */ limit = 16; - } else if (sc->bnx_asicrev == BGE_ASICREV_BCM57765) { + } else if (BNX_IS_57765_FAMILY(sc) || + sc->bnx_asicrev == BGE_ASICREV_BCM5762) { limit = 4; } else { limit = 1; @@ -1379,12 +1362,12 @@ bnx_blockinit(struct bnx_softc *sc) * within the host, so the nicaddr field in the RCB isn't used. */ vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; - BGE_HOSTADDR(taddr, sc->bnx_ldata.bnx_rx_return_ring_paddr); + BGE_HOSTADDR(taddr, ret->bnx_rx_ret_ring_paddr); RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, - BGE_RCB_MAXLEN_FLAGS(sc->bnx_return_ring_cnt, 0)); + BGE_RCB_MAXLEN_FLAGS(BNX_RETURN_RING_CNT, 0)); /* Set random backoff seed for TX */ CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, @@ -1395,7 +1378,8 @@ bnx_blockinit(struct bnx_softc *sc) /* Set inter-packet gap */ val = 0x2620; - if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) { + if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || + sc->bnx_asicrev == BGE_ASICREV_BCM5762) { val |= CSR_READ_4(sc, BGE_TX_LENGTHS) & (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK); } @@ -1505,20 +1489,22 @@ bnx_blockinit(struct bnx_softc *sc) CSR_WRITE_4(sc, BGE_WDMA_MODE, val); DELAY(40); - if (sc->bnx_asicrev == BGE_ASICREV_BCM5761 || - sc->bnx_asicrev == BGE_ASICREV_BCM5784 || - sc->bnx_asicrev == BGE_ASICREV_BCM5785 || - sc->bnx_asicrev == BGE_ASICREV_BCM57780 || - BNX_IS_5717_PLUS(sc)) { - uint32_t dmactl; + if (BNX_IS_57765_PLUS(sc)) { + uint32_t dmactl, dmactl_reg; + + if (sc->bnx_asicrev == BGE_ASICREV_BCM5762) + dmactl_reg = BGE_RDMA_RSRVCTRL2; + else + dmactl_reg = BGE_RDMA_RSRVCTRL; - dmactl = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL); + dmactl = CSR_READ_4(sc, dmactl_reg); /* * Adjust tx margin to prevent TX data corruption and * fix internal FIFO overflow. */ if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 || - sc->bnx_asicrev == BGE_ASICREV_BCM5720) { + sc->bnx_asicrev == BGE_ASICREV_BCM5720 || + sc->bnx_asicrev == BGE_ASICREV_BCM5762) { dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK | BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK | BGE_RDMA_RSRVCTRL_TXMRGN_MASK); @@ -1531,7 +1517,7 @@ bnx_blockinit(struct bnx_softc *sc) * The fix is to limit the number of RX BDs * the hardware would fetch at a fime. */ - CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL, + CSR_WRITE_4(sc, dmactl_reg, dmactl | BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX); } @@ -1540,13 +1526,21 @@ bnx_blockinit(struct bnx_softc *sc) CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); - } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) { + } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || + sc->bnx_asicrev == BGE_ASICREV_BCM5762) { + uint32_t ctrl_reg; + + if (sc->bnx_asicrev == BGE_ASICREV_BCM5762) + ctrl_reg = BGE_RDMA_LSO_CRPTEN_CTRL2; + else + ctrl_reg = BGE_RDMA_LSO_CRPTEN_CTRL; + /* * Allow 4KB burst length reads for non-LSO frames. * Enable 512B burst length reads for buffer descriptors. */ - CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, - CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | + CSR_WRITE_4(sc, ctrl_reg, + CSR_READ_4(sc, ctrl_reg) | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); } @@ -1562,7 +1556,8 @@ bnx_blockinit(struct bnx_softc *sc) BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; } - if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) { + if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || + sc->bnx_asicrev == BGE_ASICREV_BCM5762) { val |= CSR_READ_4(sc, BGE_RDMA_MODE) & BGE_RDMAMODE_H2BNC_VLAN_DET; /* @@ -1571,6 +1566,10 @@ bnx_blockinit(struct bnx_softc *sc) */ val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS; } + if (sc->bnx_asicrev == BGE_ASICREV_BCM57766) + val |= BGE_RDMAMODE_JMB_2K_MMRR; + if (sc->bnx_flags & BNX_FLAG_TSO) + val |= BGE_RDMAMODE_TSO4_ENABLE; val |= BGE_RDMAMODE_FIFO_LONG_BURST; CSR_WRITE_4(sc, BGE_RDMA_MODE, val); DELAY(40); @@ -1594,7 +1593,12 @@ bnx_blockinit(struct bnx_softc *sc) CSR_WRITE_4(sc, BGE_SDC_MODE, val); /* Turn on send data initiator state machine */ - CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); + if (sc->bnx_flags & BNX_FLAG_TSO) { + CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | + BGE_SDIMODE_HW_LSO_PRE_DMA); + } else { + CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); + } /* Turn on send BD initiator state machine */ CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); @@ -1678,21 +1682,22 @@ bnx_attach(device_t dev) { struct ifnet *ifp; struct bnx_softc *sc; - uint32_t hwcfg = 0, misccfg; - int error = 0, rid, capmask; + uint32_t hwcfg = 0; + int error = 0, rid, capmask, i; uint8_t ether_addr[ETHER_ADDR_LEN]; - uint16_t product, vendor; - driver_intr_t *intr_func; + uint16_t product; uintptr_t mii_priv = 0; - u_int intr_flags; +#ifdef BNX_TSO_DEBUG + char desc[32]; +#endif sc = device_get_softc(dev); sc->bnx_dev = dev; - callout_init(&sc->bnx_stat_timer); + callout_init_mp(&sc->bnx_stat_timer); lwkt_serialize_init(&sc->bnx_jslot_serializer); + lwkt_serialize_init(&sc->bnx_main_serialize); product = pci_get_device(dev); - vendor = pci_get_vendor(dev); #ifndef BURN_BRIDGES if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { @@ -1738,17 +1743,25 @@ bnx_attach(device_t dev) switch (product) { case PCI_PRODUCT_BROADCOM_BCM5717: + case PCI_PRODUCT_BROADCOM_BCM5717C: case PCI_PRODUCT_BROADCOM_BCM5718: case PCI_PRODUCT_BROADCOM_BCM5719: case PCI_PRODUCT_BROADCOM_BCM5720_ALT: + case PCI_PRODUCT_BROADCOM_BCM5725: + case PCI_PRODUCT_BROADCOM_BCM5727: + case PCI_PRODUCT_BROADCOM_BCM5762: sc->bnx_chipid = pci_read_config(dev, BGE_PCI_GEN2_PRODID_ASICREV, 4); break; case PCI_PRODUCT_BROADCOM_BCM57761: + case PCI_PRODUCT_BROADCOM_BCM57762: case PCI_PRODUCT_BROADCOM_BCM57765: + case PCI_PRODUCT_BROADCOM_BCM57766: case PCI_PRODUCT_BROADCOM_BCM57781: + case PCI_PRODUCT_BROADCOM_BCM57782: case PCI_PRODUCT_BROADCOM_BCM57785: + case PCI_PRODUCT_BROADCOM_BCM57786: case PCI_PRODUCT_BROADCOM_BCM57791: case PCI_PRODUCT_BROADCOM_BCM57795: sc->bnx_chipid = pci_read_config(dev, @@ -1761,6 +1774,9 @@ bnx_attach(device_t dev) break; } } + if (sc->bnx_chipid == BGE_CHIPID_BCM5717_C0) + sc->bnx_chipid = BGE_CHIPID_BCM5720_A0; + sc->bnx_asicrev = BGE_ASICREV(sc->bnx_chipid); sc->bnx_chiprev = BGE_CHIPREV(sc->bnx_chipid); @@ -1768,16 +1784,43 @@ bnx_attach(device_t dev) case BGE_ASICREV_BCM5717: case BGE_ASICREV_BCM5719: case BGE_ASICREV_BCM5720: + sc->bnx_flags |= BNX_FLAG_5717_PLUS | BNX_FLAG_57765_PLUS; + break; + + case BGE_ASICREV_BCM5762: + sc->bnx_flags |= BNX_FLAG_57765_PLUS; + break; + case BGE_ASICREV_BCM57765: - sc->bnx_flags |= BNX_FLAG_5717_PLUS; + case BGE_ASICREV_BCM57766: + sc->bnx_flags |= BNX_FLAG_57765_FAMILY | BNX_FLAG_57765_PLUS; break; } - sc->bnx_flags |= BNX_FLAG_SHORTDMA; - if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) - sc->bnx_flags |= BNX_FLAG_NO_EEPROM; + sc->bnx_flags |= BNX_FLAG_TSO; + if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 && + sc->bnx_chipid == BGE_CHIPID_BCM5719_A0) + sc->bnx_flags &= ~BNX_FLAG_TSO; - misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK; + if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 || + BNX_IS_57765_FAMILY(sc)) { + /* + * All BCM57785 and BCM5718 families chips have a bug that + * under certain situation interrupt will not be enabled + * even if status tag is written to BGE_MBX_IRQ0_LO mailbox. + * + * While BCM5719 and BCM5720 have a hardware workaround + * which could fix the above bug. + * See the comment near BGE_PCIDMARWCTL_TAGGED_STATUS_WA in + * bnx_chipinit(). + * + * For the rest of the chips in these two families, we will + * have to poll the status block at high rate (10ms currently) + * to check whether the interrupt is hosed or not. + * See bnx_check_intr() for details. + */ + sc->bnx_flags |= BNX_FLAG_STATUSTAG_BUG; + } sc->bnx_pciecap = pci_get_pciecap_ptr(sc->bnx_dev); if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 || @@ -1794,64 +1837,15 @@ bnx_attach(device_t dev) */ capmask = MII_CAPMASK_DEFAULT; - if ((sc->bnx_asicrev == BGE_ASICREV_BCM5703 && - (misccfg == 0x4000 || misccfg == 0x8000)) || - (sc->bnx_asicrev == BGE_ASICREV_BCM5705 && - vendor == PCI_VENDOR_BROADCOM && - (product == PCI_PRODUCT_BROADCOM_BCM5901 || - product == PCI_PRODUCT_BROADCOM_BCM5901A2 || - product == PCI_PRODUCT_BROADCOM_BCM5705F)) || - (vendor == PCI_VENDOR_BROADCOM && - (product == PCI_PRODUCT_BROADCOM_BCM5751F || - product == PCI_PRODUCT_BROADCOM_BCM5753F || - product == PCI_PRODUCT_BROADCOM_BCM5787F)) || - product == PCI_PRODUCT_BROADCOM_BCM57790 || - sc->bnx_asicrev == BGE_ASICREV_BCM5906) { + if (product == PCI_PRODUCT_BROADCOM_BCM57791 || + product == PCI_PRODUCT_BROADCOM_BCM57795) { /* 10/100 only */ capmask &= ~BMSR_EXTSTAT; } mii_priv |= BRGPHY_FLAG_WIRESPEED; - - if (sc->bnx_asicrev != BGE_ASICREV_BCM5906 && - sc->bnx_asicrev != BGE_ASICREV_BCM5717 && - sc->bnx_asicrev != BGE_ASICREV_BCM5719 && - sc->bnx_asicrev != BGE_ASICREV_BCM5720 && - sc->bnx_asicrev != BGE_ASICREV_BCM5785 && - sc->bnx_asicrev != BGE_ASICREV_BCM57765 && - sc->bnx_asicrev != BGE_ASICREV_BCM57780) { - if (sc->bnx_asicrev == BGE_ASICREV_BCM5755 || - sc->bnx_asicrev == BGE_ASICREV_BCM5761 || - sc->bnx_asicrev == BGE_ASICREV_BCM5784 || - sc->bnx_asicrev == BGE_ASICREV_BCM5787) { - if (product != PCI_PRODUCT_BROADCOM_BCM5722 && - product != PCI_PRODUCT_BROADCOM_BCM5756) - mii_priv |= BRGPHY_FLAG_JITTER_BUG; - if (product == PCI_PRODUCT_BROADCOM_BCM5755M) - mii_priv |= BRGPHY_FLAG_ADJUST_TRIM; - } else { - mii_priv |= BRGPHY_FLAG_BER_BUG; - } - } - - /* - * Allocate interrupt - */ - sc->bnx_irq_type = pci_alloc_1intr(dev, bnx_msi_enable, &sc->bnx_irq_rid, - &intr_flags); - - sc->bnx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->bnx_irq_rid, - intr_flags); - if (sc->bnx_irq == NULL) { - device_printf(dev, "couldn't map interrupt\n"); - error = ENXIO; - goto fail; - } - - if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) { - sc->bnx_flags |= BNX_FLAG_ONESHOT_MSI; - bnx_enable_msi(sc); - } + if (sc->bnx_chipid == BGE_CHIPID_BCM5762_A0) + mii_priv |= BRGPHY_FLAG_5762_A0; /* Initialize if_name earlier, so if_printf could be used */ ifp = &sc->arpcom.ac_if; @@ -1875,44 +1869,63 @@ bnx_attach(device_t dev) goto fail; } - if (BNX_IS_5717_PLUS(sc)) { - sc->bnx_return_ring_cnt = BGE_RETURN_RING_CNT; - } else { - /* 5705/5750 limits RX return ring to 512 entries. */ - sc->bnx_return_ring_cnt = BGE_RETURN_RING_CNT_5705; - } + /* XXX */ + sc->bnx_tx_ringcnt = 1; + sc->bnx_rx_retcnt = 1; + + error = bnx_dma_alloc(dev); + if (error) + goto fail; - error = bnx_dma_alloc(sc); + /* + * Allocate interrupt + */ + error = bnx_alloc_intr(sc); if (error) goto fail; + /* Setup serializers */ + bnx_setup_serialize(sc); + /* Set default tuneable values. */ sc->bnx_rx_coal_ticks = BNX_RX_COAL_TICKS_DEF; sc->bnx_tx_coal_ticks = BNX_TX_COAL_TICKS_DEF; sc->bnx_rx_coal_bds = BNX_RX_COAL_BDS_DEF; sc->bnx_tx_coal_bds = BNX_TX_COAL_BDS_DEF; - sc->bnx_rx_coal_bds_int = BNX_RX_COAL_BDS_DEF; - sc->bnx_tx_coal_bds_int = BNX_TX_COAL_BDS_DEF; + sc->bnx_rx_coal_bds_int = BNX_RX_COAL_BDS_INT_DEF; + sc->bnx_tx_coal_bds_int = BNX_TX_COAL_BDS_INT_DEF; /* Set up ifnet structure */ ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = bnx_ioctl; ifp->if_start = bnx_start; -#ifdef DEVICE_POLLING - ifp->if_poll = bnx_poll; +#ifdef IFPOLL_ENABLE + ifp->if_npoll = bnx_npoll; #endif ifp->if_watchdog = bnx_watchdog; ifp->if_init = bnx_init; + ifp->if_serialize = bnx_serialize; + ifp->if_deserialize = bnx_deserialize; + ifp->if_tryserialize = bnx_tryserialize; +#ifdef INVARIANTS + ifp->if_serialize_assert = bnx_serialize_assert; +#endif ifp->if_mtu = ETHERMTU; ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; - ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1); - ifq_set_ready(&ifp->if_snd); ifp->if_capabilities |= IFCAP_HWCSUM; ifp->if_hwassist = BNX_CSUM_FEATURES; + if (sc->bnx_flags & BNX_FLAG_TSO) { + ifp->if_capabilities |= IFCAP_TSO; + ifp->if_hwassist |= CSUM_TSO; + } ifp->if_capenable = ifp->if_capabilities; + ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1); + ifq_set_ready(&ifp->if_snd); + ifq_set_subq_cnt(&ifp->if_snd, sc->bnx_tx_ringcnt); + /* * Figure out what sort of media we have by checking the * hardware config word in the first 32k of NIC internal memory, @@ -1974,9 +1987,7 @@ bnx_attach(device_t dev) * Other addresses may respond but they are not * IEEE compliant PHYs and should be ignored. */ - if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 || - sc->bnx_asicrev == BGE_ASICREV_BCM5719 || - sc->bnx_asicrev == BGE_ASICREV_BCM5720) { + if (BNX_IS_5717_PLUS(sc)) { int f; f = pci_get_function(dev); @@ -2075,11 +2086,18 @@ bnx_attach(device_t dev) * consumes a lot of CPU cycles, so leave it off by * default. */ - SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx, + SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx, SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, - "force_defrag", CTLFLAG_RW, &sc->bnx_force_defrag, 0, + "force_defrag", CTLTYPE_INT | CTLFLAG_RW, + sc, 0, bnx_sysctl_force_defrag, "I", "Force defragment on TX path"); + SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx, + SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, + "tx_wreg", CTLTYPE_INT | CTLFLAG_RW, + sc, 0, bnx_sysctl_tx_wreg, "I", + "# of segments before writing to hardware register"); + SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx, SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, "rx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW, @@ -2091,32 +2109,48 @@ bnx_attach(device_t dev) sc, 0, bnx_sysctl_tx_coal_bds_int, "I", "Transmit max coalesced BD count during interrupt."); +#ifdef BNX_TSO_DEBUG + for (i = 0; i < BNX_TSO_NSTATS; ++i) { + ksnprintf(desc, sizeof(desc), "tso%d", i + 1); + SYSCTL_ADD_ULONG(&sc->bnx_sysctl_ctx, + SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, + desc, CTLFLAG_RW, &sc->bnx_tsosegs[i], ""); + } +#endif + /* * Call MI attach routine. */ ether_ifattach(ifp, ether_addr, NULL); - if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) { - if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) { - intr_func = bnx_msi_oneshot; - if (bootverbose) - device_printf(dev, "oneshot MSI\n"); - } else { - intr_func = bnx_msi; - } - } else { - intr_func = bnx_intr_legacy; + /* Setup TX rings and subqueues */ + for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { + struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); + struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; + + ifsq_set_cpuid(ifsq, txr->bnx_tx_cpuid); + ifsq_set_hw_serialize(ifsq, &txr->bnx_tx_serialize); +#ifdef notyet + ifsq_set_priv(ifsq, txr); + txr->ifsq = ifsq; + + ifsq_watchdog_init(&txr->tx_watchdog, ifsq, bce_watchdog); +#endif } - error = bus_setup_intr(dev, sc->bnx_irq, INTR_MPSAFE, intr_func, sc, - &sc->bnx_intrhand, ifp->if_serializer); + +#ifdef IFPOLL_ENABLE + ifpoll_compat_setup(&sc->bnx_npoll, + &sc->bnx_sysctl_ctx, sc->bnx_sysctl_tree, + device_get_unit(dev), &sc->bnx_main_serialize); +#endif + + error = bnx_setup_intr(sc); if (error) { ether_ifdetach(ifp); - device_printf(dev, "couldn't set up irq\n"); goto fail; } - ifp->if_cpuid = rman_get_cpuid(sc->bnx_irq); - KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); + sc->bnx_stat_cpuid = sc->bnx_intr_data[0].bnx_intr_cpuid; return(0); fail: @@ -2132,11 +2166,11 @@ bnx_detach(device_t dev) if (device_is_attached(dev)) { struct ifnet *ifp = &sc->arpcom.ac_if; - lwkt_serialize_enter(ifp->if_serializer); + ifnet_serialize_all(ifp); bnx_stop(sc); bnx_reset(sc); - bus_teardown_intr(dev, sc->bnx_irq, sc->bnx_intrhand); - lwkt_serialize_exit(ifp->if_serializer); + bnx_teardown_intr(sc, sc->bnx_intr_cnt); + ifnet_deserialize_all(ifp); ether_ifdetach(ifp); } @@ -2147,12 +2181,7 @@ bnx_detach(device_t dev) device_delete_child(dev, sc->bnx_miibus); bus_generic_detach(dev); - if (sc->bnx_irq != NULL) { - bus_release_resource(dev, SYS_RES_IRQ, sc->bnx_irq_rid, - sc->bnx_irq); - } - if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) - pci_release_msi(dev); + bnx_free_intr(sc); if (sc->bnx_res != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, @@ -2164,6 +2193,9 @@ bnx_detach(device_t dev) bnx_dma_free(sc); + if (sc->bnx_serialize != NULL) + kfree(sc->bnx_serialize, M_DEVBUF); + return 0; } @@ -2178,10 +2210,7 @@ bnx_reset(struct bnx_softc *sc) dev = sc->bnx_dev; - if (sc->bnx_asicrev != BGE_ASICREV_BCM5906) - write_op = bnx_writemem_direct; - else - write_op = bnx_writereg_ind; + write_op = bnx_writemem_direct; /* Save some important PCI state. */ cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); @@ -2209,8 +2238,7 @@ bnx_reset(struct bnx_softc *sc) /* XXX: Broadcom Linux driver. */ /* Force PCI-E 1.0a mode */ - if (sc->bnx_asicrev != BGE_ASICREV_BCM5785 && - !BNX_IS_5717_PLUS(sc) && + if (!BNX_IS_57765_PLUS(sc) && CSR_READ_4(sc, BGE_PCIE_PHY_TSTCTL) == (BGE_PCIE_PHY_TSTCTL_PSCRAM | BGE_PCIE_PHY_TSTCTL_PCIE10)) { @@ -2233,17 +2261,6 @@ bnx_reset(struct bnx_softc *sc) /* Issue global reset */ write_op(sc, BGE_MISC_CFG, reset); - if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) { - uint32_t status, ctrl; - - status = CSR_READ_4(sc, BGE_VCPU_STATUS); - CSR_WRITE_4(sc, BGE_VCPU_STATUS, - status | BGE_VCPU_STATUS_DRV_RESET); - ctrl = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL); - CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL, - ctrl & ~BGE_VCPU_EXT_CTRL_HALT_CPU); - } - DELAY(1000); /* XXX: Broadcom Linux driver. */ @@ -2288,38 +2305,24 @@ bnx_reset(struct bnx_softc *sc) /* Enable memory arbiter */ CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); - if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) { - for (i = 0; i < BNX_TIMEOUT; i++) { - val = CSR_READ_4(sc, BGE_VCPU_STATUS); - if (val & BGE_VCPU_STATUS_INIT_DONE) - break; - DELAY(100); - } - if (i == BNX_TIMEOUT) { - if_printf(&sc->arpcom.ac_if, "reset timed out\n"); - return; - } - } else { - /* - * Poll until we see the 1's complement of the magic number. - * This indicates that the firmware initialization - * is complete. - */ - for (i = 0; i < BNX_FIRMWARE_TIMEOUT; i++) { - val = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); - if (val == ~BGE_MAGIC_NUMBER) - break; - DELAY(10); - } - if (i == BNX_FIRMWARE_TIMEOUT) { - if_printf(&sc->arpcom.ac_if, "firmware handshake " - "timed out, found 0x%08x\n", val); - } - - /* BCM57765 A0 needs additional time before accessing. */ - if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) - DELAY(10 * 1000); + /* + * Poll until we see the 1's complement of the magic number. + * This indicates that the firmware initialization is complete. + */ + for (i = 0; i < BNX_FIRMWARE_TIMEOUT; i++) { + val = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); + if (val == ~BGE_MAGIC_NUMBER) + break; + DELAY(10); } + if (i == BNX_FIRMWARE_TIMEOUT) { + if_printf(&sc->arpcom.ac_if, "firmware handshake " + "timed out, found 0x%08x\n", val); + } + + /* BCM57765 A0 needs additional time before accessing. */ + if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) + DELAY(10 * 1000); /* * XXX Wait for the value of the PCISTATE register to @@ -2354,15 +2357,17 @@ bnx_reset(struct bnx_softc *sc) CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); } + CSR_WRITE_4(sc, BGE_MI_MODE, + sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL); + DELAY(80); + /* XXX: Broadcom Linux driver. */ - if (!BNX_IS_5717_PLUS(sc) && - sc->bnx_chipid != BGE_CHIPID_BCM5750_A0 && - sc->bnx_asicrev != BGE_ASICREV_BCM5785) { + if (!BNX_IS_57765_PLUS(sc)) { uint32_t v; /* Enable Data FIFO protection. */ - v = CSR_READ_4(sc, 0x7c00); - CSR_WRITE_4(sc, 0x7c00, v | (1<<25)); + v = CSR_READ_4(sc, BGE_PCIE_TLDLPL_PORT); + CSR_WRITE_4(sc, BGE_PCIE_TLDLPL_PORT, v | (1 << 25)); } DELAY(10000); @@ -2383,25 +2388,26 @@ bnx_reset(struct bnx_softc *sc) */ static void -bnx_rxeof(struct bnx_softc *sc, uint16_t rx_prod) +bnx_rxeof(struct bnx_rx_ret_ring *ret, uint16_t rx_prod, int count) { - struct ifnet *ifp; + struct bnx_softc *sc = ret->bnx_sc; + struct bnx_rx_std_ring *std = ret->bnx_std; + struct ifnet *ifp = &sc->arpcom.ac_if; int stdcnt = 0, jumbocnt = 0; - ifp = &sc->arpcom.ac_if; - - while (sc->bnx_rx_saved_considx != rx_prod) { + while (ret->bnx_rx_saved_considx != rx_prod && count != 0) { struct bge_rx_bd *cur_rx; uint32_t rxidx; struct mbuf *m = NULL; uint16_t vlan_tag = 0; int have_tag = 0; - cur_rx = - &sc->bnx_ldata.bnx_rx_return_ring[sc->bnx_rx_saved_considx]; + --count; + + cur_rx = &ret->bnx_rx_ret_ring[ret->bnx_rx_saved_considx]; rxidx = cur_rx->bge_idx; - BNX_INC(sc->bnx_rx_saved_considx, sc->bnx_return_ring_cnt); + BNX_INC(ret->bnx_rx_saved_considx, BNX_RETURN_RING_CNT); if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { have_tag = 1; @@ -2413,7 +2419,7 @@ bnx_rxeof(struct bnx_softc *sc, uint16_t rx_prod) jumbocnt++; if (rxidx != sc->bnx_jumbo) { - ifp->if_ierrors++; + IFNET_STAT_INC(ifp, ierrors, 1); if_printf(ifp, "sw jumbo index(%d) " "and hw jumbo index(%d) mismatch, drop!\n", sc->bnx_jumbo, rxidx); @@ -2421,44 +2427,44 @@ bnx_rxeof(struct bnx_softc *sc, uint16_t rx_prod) continue; } - m = sc->bnx_cdata.bnx_rx_jumbo_chain[rxidx].bnx_mbuf; + m = sc->bnx_cdata.bnx_rx_jumbo_chain[rxidx].bnx_rx_mbuf; if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { - ifp->if_ierrors++; + IFNET_STAT_INC(ifp, ierrors, 1); bnx_setup_rxdesc_jumbo(sc, sc->bnx_jumbo); continue; } if (bnx_newbuf_jumbo(sc, sc->bnx_jumbo, 0)) { - ifp->if_ierrors++; + IFNET_STAT_INC(ifp, ierrors, 1); bnx_setup_rxdesc_jumbo(sc, sc->bnx_jumbo); continue; } } else { - BNX_INC(sc->bnx_std, BGE_STD_RX_RING_CNT); + BNX_INC(std->bnx_rx_std, BGE_STD_RX_RING_CNT); stdcnt++; - if (rxidx != sc->bnx_std) { - ifp->if_ierrors++; + if (rxidx != std->bnx_rx_std) { + IFNET_STAT_INC(ifp, ierrors, 1); if_printf(ifp, "sw std index(%d) " "and hw std index(%d) mismatch, drop!\n", - sc->bnx_std, rxidx); - bnx_setup_rxdesc_std(sc, rxidx); + std->bnx_rx_std, rxidx); + bnx_setup_rxdesc_std(std, rxidx); continue; } - m = sc->bnx_cdata.bnx_rx_std_chain[rxidx].bnx_mbuf; + m = std->bnx_rx_std_buf[rxidx].bnx_rx_mbuf; if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { - ifp->if_ierrors++; - bnx_setup_rxdesc_std(sc, sc->bnx_std); + IFNET_STAT_INC(ifp, ierrors, 1); + bnx_setup_rxdesc_std(std, std->bnx_rx_std); continue; } - if (bnx_newbuf_std(sc, sc->bnx_std, 0)) { - ifp->if_ierrors++; - bnx_setup_rxdesc_std(sc, sc->bnx_std); + if (bnx_newbuf_std(ret, std->bnx_rx_std, 0)) { + IFNET_STAT_INC(ifp, ierrors, 1); + bnx_setup_rxdesc_std(std, std->bnx_rx_std); continue; } } - ifp->if_ipackets++; + IFNET_STAT_INC(ifp, ipackets, 1); m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; m->m_pkthdr.rcvif = ifp; @@ -2485,105 +2491,121 @@ bnx_rxeof(struct bnx_softc *sc, uint16_t rx_prod) if (have_tag) { m->m_flags |= M_VLANTAG; m->m_pkthdr.ether_vlantag = vlan_tag; - have_tag = vlan_tag = 0; } ifp->if_input(ifp, m); } - bnx_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bnx_rx_saved_considx); + bnx_writembx(sc, BGE_MBX_RX_CONS0_LO, ret->bnx_rx_saved_considx); if (stdcnt) - bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bnx_std); + bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, std->bnx_rx_std); if (jumbocnt) bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo); } static void -bnx_txeof(struct bnx_softc *sc, uint16_t tx_cons) +bnx_txeof(struct bnx_tx_ring *txr, uint16_t tx_cons) { - struct bge_tx_bd *cur_tx = NULL; - struct ifnet *ifp; - - ifp = &sc->arpcom.ac_if; + struct ifnet *ifp = &txr->bnx_sc->arpcom.ac_if; /* * Go through our tx ring and free mbufs for those * frames that have been sent. */ - while (sc->bnx_tx_saved_considx != tx_cons) { + while (txr->bnx_tx_saved_considx != tx_cons) { + struct bnx_tx_buf *buf; uint32_t idx = 0; - idx = sc->bnx_tx_saved_considx; - cur_tx = &sc->bnx_ldata.bnx_tx_ring[idx]; - if (cur_tx->bge_flags & BGE_TXBDFLAG_END) - ifp->if_opackets++; - if (sc->bnx_cdata.bnx_tx_chain[idx] != NULL) { - bus_dmamap_unload(sc->bnx_cdata.bnx_tx_mtag, - sc->bnx_cdata.bnx_tx_dmamap[idx]); - m_freem(sc->bnx_cdata.bnx_tx_chain[idx]); - sc->bnx_cdata.bnx_tx_chain[idx] = NULL; + idx = txr->bnx_tx_saved_considx; + buf = &txr->bnx_tx_buf[idx]; + if (buf->bnx_tx_mbuf != NULL) { + IFNET_STAT_INC(ifp, opackets, 1); + bus_dmamap_unload(txr->bnx_tx_mtag, + buf->bnx_tx_dmamap); + m_freem(buf->bnx_tx_mbuf); + buf->bnx_tx_mbuf = NULL; } - sc->bnx_txcnt--; - BNX_INC(sc->bnx_tx_saved_considx, BGE_TX_RING_CNT); + txr->bnx_tx_cnt--; + BNX_INC(txr->bnx_tx_saved_considx, BGE_TX_RING_CNT); } - if (cur_tx != NULL && - (BGE_TX_RING_CNT - sc->bnx_txcnt) >= + if ((BGE_TX_RING_CNT - txr->bnx_tx_cnt) >= (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) - ifp->if_flags &= ~IFF_OACTIVE; + ifq_clr_oactive(&ifp->if_snd); - if (sc->bnx_txcnt == 0) + if (txr->bnx_tx_cnt == 0) ifp->if_timer = 0; if (!ifq_is_empty(&ifp->if_snd)) if_devstart(ifp); } -#ifdef DEVICE_POLLING +#ifdef IFPOLL_ENABLE + +static void +bnx_npoll(struct ifnet *ifp, struct ifpoll_info *info) +{ + struct bnx_softc *sc = ifp->if_softc; + + ASSERT_IFNET_SERIALIZED_ALL(ifp); + + if (info != NULL) { + int cpuid = sc->bnx_npoll.ifpc_cpuid; + + info->ifpi_rx[cpuid].poll_func = bnx_npoll_compat; + info->ifpi_rx[cpuid].arg = NULL; + info->ifpi_rx[cpuid].serializer = &sc->bnx_main_serialize; + + if (ifp->if_flags & IFF_RUNNING) + bnx_disable_intr(sc); + ifq_set_cpuid(&ifp->if_snd, cpuid); + } else { + if (ifp->if_flags & IFF_RUNNING) + bnx_enable_intr(sc); + ifq_set_cpuid(&ifp->if_snd, sc->bnx_tx_ring[0].bnx_tx_cpuid); + } +} static void -bnx_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) +bnx_npoll_compat(struct ifnet *ifp, void *arg __unused, int cycle) { struct bnx_softc *sc = ifp->if_softc; + struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; /* XXX */ + struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0]; /* XXX */ struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block; uint16_t rx_prod, tx_cons; - switch(cmd) { - case POLL_REGISTER: - bnx_disable_intr(sc); - break; - case POLL_DEREGISTER: - bnx_enable_intr(sc); - break; - case POLL_AND_CHECK_STATUS: + ASSERT_SERIALIZED(&sc->bnx_main_serialize); + + if (sc->bnx_npoll.ifpc_stcount-- == 0) { + sc->bnx_npoll.ifpc_stcount = sc->bnx_npoll.ifpc_stfrac; /* * Process link state changes. */ bnx_link_poll(sc); - /* Fall through */ - case POLL_ONLY: - sc->bnx_status_tag = sblk->bge_status_tag; - /* - * Use a load fence to ensure that status_tag - * is saved before rx_prod and tx_cons. - */ - cpu_lfence(); - - rx_prod = sblk->bge_idx[0].bge_rx_prod_idx; - tx_cons = sblk->bge_idx[0].bge_tx_cons_idx; - if (ifp->if_flags & IFF_RUNNING) { - rx_prod = sblk->bge_idx[0].bge_rx_prod_idx; - if (sc->bnx_rx_saved_considx != rx_prod) - bnx_rxeof(sc, rx_prod); - - tx_cons = sblk->bge_idx[0].bge_tx_cons_idx; - if (sc->bnx_tx_saved_considx != tx_cons) - bnx_txeof(sc, tx_cons); - } - break; } + + sc->bnx_status_tag = sblk->bge_status_tag; + + /* + * Use a load fence to ensure that status_tag is saved + * before rx_prod and tx_cons. + */ + cpu_lfence(); + + lwkt_serialize_enter(&ret->bnx_rx_ret_serialize); + rx_prod = *ret->bnx_rx_considx; + if (ret->bnx_rx_saved_considx != rx_prod) + bnx_rxeof(ret, rx_prod, cycle); + lwkt_serialize_exit(&ret->bnx_rx_ret_serialize); + + lwkt_serialize_enter(&txr->bnx_tx_serialize); + tx_cons = *txr->bnx_tx_considx; + if (txr->bnx_tx_saved_considx != tx_cons) + bnx_txeof(txr, tx_cons); + lwkt_serialize_exit(&txr->bnx_tx_serialize); } -#endif +#endif /* IFPOLL_ENABLE */ static void bnx_intr_legacy(void *xsc) @@ -2631,9 +2653,10 @@ bnx_intr(struct bnx_softc *sc) { struct ifnet *ifp = &sc->arpcom.ac_if; struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block; - uint16_t rx_prod, tx_cons; uint32_t status; + ASSERT_SERIALIZED(&sc->bnx_main_serialize); + sc->bnx_status_tag = sblk->bge_status_tag; /* * Use a load fence to ensure that status_tag is saved @@ -2641,34 +2664,40 @@ bnx_intr(struct bnx_softc *sc) */ cpu_lfence(); - rx_prod = sblk->bge_idx[0].bge_rx_prod_idx; - tx_cons = sblk->bge_idx[0].bge_tx_cons_idx; status = sblk->bge_status; if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) || sc->bnx_link_evt) bnx_link_poll(sc); if (ifp->if_flags & IFF_RUNNING) { - if (sc->bnx_rx_saved_considx != rx_prod) - bnx_rxeof(sc, rx_prod); + struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; /* XXX */ + struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0]; /* XXX */ + uint16_t rx_prod, tx_cons; + + lwkt_serialize_enter(&ret->bnx_rx_ret_serialize); + rx_prod = *ret->bnx_rx_considx; + if (ret->bnx_rx_saved_considx != rx_prod) + bnx_rxeof(ret, rx_prod, -1); + lwkt_serialize_exit(&ret->bnx_rx_ret_serialize); - if (sc->bnx_tx_saved_considx != tx_cons) - bnx_txeof(sc, tx_cons); + lwkt_serialize_enter(&txr->bnx_tx_serialize); + tx_cons = *txr->bnx_tx_considx; + if (txr->bnx_tx_saved_considx != tx_cons) + bnx_txeof(txr, tx_cons); + lwkt_serialize_exit(&txr->bnx_tx_serialize); } bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24); - - if (sc->bnx_coal_chg) - bnx_coal_change(sc); } static void bnx_tick(void *xsc) { struct bnx_softc *sc = xsc; - struct ifnet *ifp = &sc->arpcom.ac_if; - lwkt_serialize_enter(ifp->if_serializer); + lwkt_serialize_enter(&sc->bnx_main_serialize); + + KKASSERT(mycpuid == sc->bnx_stat_cpuid); bnx_stats_update_regs(sc); @@ -2686,7 +2715,7 @@ bnx_tick(void *xsc) callout_reset(&sc->bnx_stat_timer, hz, bnx_tick, sc); - lwkt_serialize_exit(ifp->if_serializer); + lwkt_serialize_exit(&sc->bnx_main_serialize); } static void @@ -2703,12 +2732,11 @@ bnx_stats_update_regs(struct bnx_softc *sc) s++; } - ifp->if_collisions += + IFNET_STAT_SET(ifp, collisions, (stats.dot3StatsSingleCollisionFrames + stats.dot3StatsMultipleCollisionFrames + stats.dot3StatsExcessiveCollisions + - stats.dot3StatsLateCollisions) - - ifp->if_collisions; + stats.dot3StatsLateCollisions)); } /* @@ -2716,16 +2744,36 @@ bnx_stats_update_regs(struct bnx_softc *sc) * pointers to descriptors. */ static int -bnx_encap(struct bnx_softc *sc, struct mbuf **m_head0, uint32_t *txidx) +bnx_encap(struct bnx_tx_ring *txr, struct mbuf **m_head0, uint32_t *txidx, + int *segs_used) { struct bge_tx_bd *d = NULL; - uint16_t csum_flags = 0; + uint16_t csum_flags = 0, vlan_tag = 0, mss = 0; bus_dma_segment_t segs[BNX_NSEG_NEW]; bus_dmamap_t map; int error, maxsegs, nsegs, idx, i; struct mbuf *m_head = *m_head0, *m_new; - if (m_head->m_pkthdr.csum_flags) { + if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { +#ifdef BNX_TSO_DEBUG + int tso_nsegs; +#endif + + error = bnx_setup_tso(txr, m_head0, &mss, &csum_flags); + if (error) + return error; + m_head = *m_head0; + +#ifdef BNX_TSO_DEBUG + tso_nsegs = (m_head->m_pkthdr.len / + m_head->m_pkthdr.tso_segsz) - 1; + if (tso_nsegs > (BNX_TSO_NSTATS - 1)) + tso_nsegs = BNX_TSO_NSTATS - 1; + else if (tso_nsegs < 0) + tso_nsegs = 0; + txr->bnx_sc->bnx_tsosegs[tso_nsegs]++; +#endif + } else if (m_head->m_pkthdr.csum_flags & BNX_CSUM_FEATURES) { if (m_head->m_pkthdr.csum_flags & CSUM_IP) csum_flags |= BGE_TXBDFLAG_IP_CSUM; if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) @@ -2735,11 +2783,15 @@ bnx_encap(struct bnx_softc *sc, struct mbuf **m_head0, uint32_t *txidx) else if (m_head->m_flags & M_FRAG) csum_flags |= BGE_TXBDFLAG_IP_FRAG; } + if (m_head->m_flags & M_VLANTAG) { + csum_flags |= BGE_TXBDFLAG_VLAN_TAG; + vlan_tag = m_head->m_pkthdr.ether_vlantag; + } idx = *txidx; - map = sc->bnx_cdata.bnx_tx_dmamap[idx]; + map = txr->bnx_tx_buf[idx].bnx_tx_dmamap; - maxsegs = (BGE_TX_RING_CNT - sc->bnx_txcnt) - BNX_NSEG_RSVD; + maxsegs = (BGE_TX_RING_CNT - txr->bnx_tx_cnt) - BNX_NSEG_RSVD; KASSERT(maxsegs >= BNX_NSEG_SPARE, ("not enough segments %d", maxsegs)); @@ -2762,7 +2814,8 @@ bnx_encap(struct bnx_softc *sc, struct mbuf **m_head0, uint32_t *txidx) goto back; } - if ((sc->bnx_flags & BNX_FLAG_SHORTDMA) && m_head->m_next != NULL) { + if ((txr->bnx_tx_flags & BNX_TX_FLAG_SHORTDMA) && + m_head->m_next != NULL) { m_new = bnx_defrag_shortdma(m_head); if (m_new == NULL) { error = ENOBUFS; @@ -2770,7 +2823,9 @@ bnx_encap(struct bnx_softc *sc, struct mbuf **m_head0, uint32_t *txidx) } *m_head0 = m_head = m_new; } - if (sc->bnx_force_defrag && m_head->m_next != NULL) { + if ((m_head->m_pkthdr.csum_flags & CSUM_TSO) == 0 && + (txr->bnx_tx_flags & BNX_TX_FLAG_FORCE_DEFRAG) && + m_head->m_next != NULL) { /* * Forcefully defragment mbuf chain to overcome hardware * limitation which only support a single outstanding @@ -2782,21 +2837,24 @@ bnx_encap(struct bnx_softc *sc, struct mbuf **m_head0, uint32_t *txidx) *m_head0 = m_head = m_new; } - error = bus_dmamap_load_mbuf_defrag(sc->bnx_cdata.bnx_tx_mtag, map, - m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); + error = bus_dmamap_load_mbuf_defrag(txr->bnx_tx_mtag, map, + m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); if (error) goto back; + *segs_used += nsegs; m_head = *m_head0; - bus_dmamap_sync(sc->bnx_cdata.bnx_tx_mtag, map, BUS_DMASYNC_PREWRITE); + bus_dmamap_sync(txr->bnx_tx_mtag, map, BUS_DMASYNC_PREWRITE); for (i = 0; ; i++) { - d = &sc->bnx_ldata.bnx_tx_ring[idx]; + d = &txr->bnx_tx_ring[idx]; d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); d->bge_len = segs[i].ds_len; d->bge_flags = csum_flags; + d->bge_vlan_tag = vlan_tag; + d->bge_mss = mss; if (i == nsegs - 1) break; @@ -2805,23 +2863,14 @@ bnx_encap(struct bnx_softc *sc, struct mbuf **m_head0, uint32_t *txidx) /* Mark the last segment as end of packet... */ d->bge_flags |= BGE_TXBDFLAG_END; - /* Set vlan tag to the first segment of the packet. */ - d = &sc->bnx_ldata.bnx_tx_ring[*txidx]; - if (m_head->m_flags & M_VLANTAG) { - d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; - d->bge_vlan_tag = m_head->m_pkthdr.ether_vlantag; - } else { - d->bge_vlan_tag = 0; - } - /* * Insure that the map for this transmission is placed at * the array index of the last descriptor in this chain. */ - sc->bnx_cdata.bnx_tx_dmamap[*txidx] = sc->bnx_cdata.bnx_tx_dmamap[idx]; - sc->bnx_cdata.bnx_tx_dmamap[idx] = map; - sc->bnx_cdata.bnx_tx_chain[idx] = m_head; - sc->bnx_txcnt += nsegs; + txr->bnx_tx_buf[*txidx].bnx_tx_dmamap = txr->bnx_tx_buf[idx].bnx_tx_dmamap; + txr->bnx_tx_buf[idx].bnx_tx_dmamap = map; + txr->bnx_tx_buf[idx].bnx_tx_mbuf = m_head; + txr->bnx_tx_cnt += nsegs; BNX_INC(idx, BGE_TX_RING_CNT); *txidx = idx; @@ -2838,87 +2887,69 @@ back: * to the mbuf data regions directly in the transmit descriptors. */ static void -bnx_start(struct ifnet *ifp) +bnx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) { struct bnx_softc *sc = ifp->if_softc; + struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; /* XXX */ struct mbuf *m_head = NULL; uint32_t prodidx; - int need_trans; + int nsegs = 0; - if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) - return; + ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); + ASSERT_SERIALIZED(&txr->bnx_tx_serialize); - prodidx = sc->bnx_tx_prodidx; - - need_trans = 0; - while (sc->bnx_cdata.bnx_tx_chain[prodidx] == NULL) { - m_head = ifq_dequeue(&ifp->if_snd, NULL); - if (m_head == NULL) - break; + if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) + return; - /* - * XXX - * The code inside the if() block is never reached since we - * must mark CSUM_IP_FRAGS in our if_hwassist to start getting - * requests to checksum TCP/UDP in a fragmented packet. - * - * XXX - * safety overkill. If this is a fragmented packet chain - * with delayed TCP/UDP checksums, then only encapsulate - * it if we have enough descriptors to handle the entire - * chain at once. - * (paranoia -- may not actually be needed) - */ - if ((m_head->m_flags & M_FIRSTFRAG) && - (m_head->m_pkthdr.csum_flags & CSUM_DELAY_DATA)) { - if ((BGE_TX_RING_CNT - sc->bnx_txcnt) < - m_head->m_pkthdr.csum_data + BNX_NSEG_RSVD) { - ifp->if_flags |= IFF_OACTIVE; - ifq_prepend(&ifp->if_snd, m_head); - break; - } - } + prodidx = txr->bnx_tx_prodidx; + while (txr->bnx_tx_buf[prodidx].bnx_tx_mbuf == NULL) { /* * Sanity check: avoid coming within BGE_NSEG_RSVD * descriptors of the end of the ring. Also make * sure there are BGE_NSEG_SPARE descriptors for - * jumbo buffers' defragmentation. + * jumbo buffers' or TSO segments' defragmentation. */ - if ((BGE_TX_RING_CNT - sc->bnx_txcnt) < + if ((BGE_TX_RING_CNT - txr->bnx_tx_cnt) < (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) { - ifp->if_flags |= IFF_OACTIVE; - ifq_prepend(&ifp->if_snd, m_head); + ifq_set_oactive(&ifp->if_snd); break; } + m_head = ifq_dequeue(&ifp->if_snd, NULL); + if (m_head == NULL) + break; + /* * Pack the data into the transmit ring. If we * don't have room, set the OACTIVE flag and wait * for the NIC to drain the ring. */ - if (bnx_encap(sc, &m_head, &prodidx)) { - ifp->if_flags |= IFF_OACTIVE; - ifp->if_oerrors++; + if (bnx_encap(txr, &m_head, &prodidx, &nsegs)) { + ifq_set_oactive(&ifp->if_snd); + IFNET_STAT_INC(ifp, oerrors, 1); break; } - need_trans = 1; - - ETHER_BPF_MTAP(ifp, m_head); - } - if (!need_trans) - return; + if (nsegs >= txr->bnx_tx_wreg) { + /* Transmit */ + bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, prodidx); + nsegs = 0; + } - /* Transmit */ - bnx_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); + ETHER_BPF_MTAP(ifp, m_head); - sc->bnx_tx_prodidx = prodidx; + /* + * Set a timeout in case the chip goes out to lunch. + */ + ifp->if_timer = 5; + } - /* - * Set a timeout in case the chip goes out to lunch. - */ - ifp->if_timer = 5; + if (nsegs > 0) { + /* Transmit */ + bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, prodidx); + } + txr->bnx_tx_prodidx = prodidx; } static void @@ -2928,8 +2959,9 @@ bnx_init(void *xsc) struct ifnet *ifp = &sc->arpcom.ac_if; uint16_t *m; uint32_t mode; + int i; - ASSERT_SERIALIZED(ifp->if_serializer); + ASSERT_IFNET_SERIALIZED_ALL(ifp); /* Cancel pending I/O and flush buffers. */ bnx_stop(sc); @@ -2962,7 +2994,7 @@ bnx_init(void *xsc) bnx_setmulti(sc); /* Init RX ring. */ - if (bnx_init_rx_ring_std(sc)) { + if (bnx_init_rx_ring_std(&sc->bnx_rx_std_ring)) { if_printf(ifp, "RX ring initialization failed\n"); bnx_stop(sc); return; @@ -2978,15 +3010,18 @@ bnx_init(void *xsc) } /* Init our RX return ring index */ - sc->bnx_rx_saved_considx = 0; + for (i = 0; i < sc->bnx_rx_retcnt; ++i) + sc->bnx_rx_ret_ring[i].bnx_rx_saved_considx = 0; /* Init TX ring. */ - bnx_init_tx_ring(sc); + for (i = 0; i < sc->bnx_tx_ringcnt; ++i) + bnx_init_tx_ring(&sc->bnx_tx_ring[i]); /* Enable TX MAC state machine lockup fix. */ mode = CSR_READ_4(sc, BGE_TX_MODE); mode |= BGE_TXMODE_MBUF_LOCKUP_FIX; - if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) { + if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || + sc->bnx_asicrev == BGE_ASICREV_BCM5762) { mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); mode |= CSR_READ_4(sc, BGE_TX_MODE) & (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); @@ -3003,12 +3038,12 @@ bnx_init(void *xsc) * this number of frames, it will drop subsequent incoming * frames until the MBUF High Watermark is reached. */ - if (sc->bnx_asicrev == BGE_ASICREV_BCM57765) + if (BNX_IS_57765_FAMILY(sc)) CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1); else CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); - if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) { + if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI) { if (bootverbose) { if_printf(ifp, "MSI_MODE: %#x\n", CSR_READ_4(sc, BGE_MSI_MODE)); @@ -3020,8 +3055,8 @@ bnx_init(void *xsc) /* Enable host interrupts if polling(4) is not enabled. */ PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA, 4); -#ifdef DEVICE_POLLING - if (ifp->if_flags & IFF_POLLING) +#ifdef IFPOLL_ENABLE + if (ifp->if_flags & IFF_NPOLLING) bnx_disable_intr(sc); else #endif @@ -3030,9 +3065,10 @@ bnx_init(void *xsc) bnx_ifmedia_upd(ifp); ifp->if_flags |= IFF_RUNNING; - ifp->if_flags &= ~IFF_OACTIVE; + ifq_clr_oactive(&ifp->if_snd); - callout_reset(&sc->bnx_stat_timer, hz, bnx_tick, sc); + callout_reset_bycpu(&sc->bnx_stat_timer, hz, bnx_tick, sc, + sc->bnx_stat_cpuid); } /* @@ -3137,7 +3173,7 @@ bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) struct ifreq *ifr = (struct ifreq *)data; int mask, error = 0; - ASSERT_SERIALIZED(ifp->if_serializer); + ASSERT_IFNET_SERIALIZED_ALL(ifp); switch (command) { case SIOCSIFMTU: @@ -3199,10 +3235,17 @@ bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) mask = ifr->ifr_reqcap ^ ifp->if_capenable; if (mask & IFCAP_HWCSUM) { ifp->if_capenable ^= (mask & IFCAP_HWCSUM); - if (IFCAP_HWCSUM & ifp->if_capenable) - ifp->if_hwassist = BNX_CSUM_FEATURES; + if (ifp->if_capenable & IFCAP_TXCSUM) + ifp->if_hwassist |= BNX_CSUM_FEATURES; + else + ifp->if_hwassist &= ~BNX_CSUM_FEATURES; + } + if (mask & IFCAP_TSO) { + ifp->if_capenable ^= (mask & IFCAP_TSO); + if (ifp->if_capenable & IFCAP_TSO) + ifp->if_hwassist |= CSUM_TSO; else - ifp->if_hwassist = 0; + ifp->if_hwassist &= ~CSUM_TSO; } break; default: @@ -3221,10 +3264,10 @@ bnx_watchdog(struct ifnet *ifp) bnx_init(sc); - ifp->if_oerrors++; + IFNET_STAT_INC(ifp, oerrors, 1); if (!ifq_is_empty(&ifp->if_snd)) - if_devstart(ifp); + if_devstart_sched(ifp); } /* @@ -3235,8 +3278,9 @@ static void bnx_stop(struct bnx_softc *sc) { struct ifnet *ifp = &sc->arpcom.ac_if; + int i; - ASSERT_SERIALIZED(ifp->if_serializer); + ASSERT_IFNET_SERIALIZED_ALL(ifp); callout_stop(&sc->bnx_stat_timer); @@ -3278,22 +3322,22 @@ bnx_stop(struct bnx_softc *sc) BNX_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); /* Free the RX lists. */ - bnx_free_rx_ring_std(sc); + bnx_free_rx_ring_std(&sc->bnx_rx_std_ring); /* Free jumbo RX list. */ if (BNX_IS_JUMBO_CAPABLE(sc)) bnx_free_rx_ring_jumbo(sc); /* Free TX buffers. */ - bnx_free_tx_ring(sc); + for (i = 0; i < sc->bnx_tx_ringcnt; ++i) + bnx_free_tx_ring(&sc->bnx_tx_ring[i]); sc->bnx_status_tag = 0; sc->bnx_link = 0; sc->bnx_coal_chg = 0; - sc->bnx_tx_saved_considx = BNX_TXCONS_UNSET; - - ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); + ifp->if_flags &= ~IFF_RUNNING; + ifq_clr_oactive(&ifp->if_snd); ifp->if_timer = 0; } @@ -3307,10 +3351,10 @@ bnx_shutdown(device_t dev) struct bnx_softc *sc = device_get_softc(dev); struct ifnet *ifp = &sc->arpcom.ac_if; - lwkt_serialize_enter(ifp->if_serializer); + ifnet_serialize_all(ifp); bnx_stop(sc); bnx_reset(sc); - lwkt_serialize_exit(ifp->if_serializer); + ifnet_deserialize_all(ifp); } static int @@ -3319,9 +3363,9 @@ bnx_suspend(device_t dev) struct bnx_softc *sc = device_get_softc(dev); struct ifnet *ifp = &sc->arpcom.ac_if; - lwkt_serialize_enter(ifp->if_serializer); + ifnet_serialize_all(ifp); bnx_stop(sc); - lwkt_serialize_exit(ifp->if_serializer); + ifnet_deserialize_all(ifp); return 0; } @@ -3332,16 +3376,16 @@ bnx_resume(device_t dev) struct bnx_softc *sc = device_get_softc(dev); struct ifnet *ifp = &sc->arpcom.ac_if; - lwkt_serialize_enter(ifp->if_serializer); + ifnet_serialize_all(ifp); if (ifp->if_flags & IFF_UP) { bnx_init(sc); if (!ifq_is_empty(&ifp->if_snd)) - if_devstart(ifp); + if_devstart_sched(ifp); } - lwkt_serialize_exit(ifp->if_serializer); + ifnet_deserialize_all(ifp); return 0; } @@ -3360,50 +3404,44 @@ bnx_setpromisc(struct bnx_softc *sc) static void bnx_dma_free(struct bnx_softc *sc) { + struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring; int i; - /* Destroy RX mbuf DMA stuffs. */ - if (sc->bnx_cdata.bnx_rx_mtag != NULL) { - for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { - bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag, - sc->bnx_cdata.bnx_rx_std_dmamap[i]); - } - bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag, - sc->bnx_cdata.bnx_rx_tmpmap); - bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag); + /* Destroy RX return rings */ + if (sc->bnx_rx_ret_ring != NULL) { + for (i = 0; i < sc->bnx_rx_retcnt; ++i) + bnx_destroy_rx_ret_ring(&sc->bnx_rx_ret_ring[i]); + kfree(sc->bnx_rx_ret_ring, M_DEVBUF); } - /* Destroy TX mbuf DMA stuffs. */ - if (sc->bnx_cdata.bnx_tx_mtag != NULL) { - for (i = 0; i < BGE_TX_RING_CNT; i++) { - bus_dmamap_destroy(sc->bnx_cdata.bnx_tx_mtag, - sc->bnx_cdata.bnx_tx_dmamap[i]); + /* Destroy RX mbuf DMA stuffs. */ + if (std->bnx_rx_mtag != NULL) { + for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { + KKASSERT(std->bnx_rx_std_buf[i].bnx_rx_mbuf == NULL); + bus_dmamap_destroy(std->bnx_rx_mtag, + std->bnx_rx_std_buf[i].bnx_rx_dmamap); } - bus_dma_tag_destroy(sc->bnx_cdata.bnx_tx_mtag); + bus_dma_tag_destroy(std->bnx_rx_mtag); } /* Destroy standard RX ring */ - bnx_dma_block_free(sc->bnx_cdata.bnx_rx_std_ring_tag, - sc->bnx_cdata.bnx_rx_std_ring_map, - sc->bnx_ldata.bnx_rx_std_ring); + bnx_dma_block_free(std->bnx_rx_std_ring_tag, + std->bnx_rx_std_ring_map, std->bnx_rx_std_ring); + + /* Destroy TX rings */ + if (sc->bnx_tx_ring != NULL) { + for (i = 0; i < sc->bnx_tx_ringcnt; ++i) + bnx_destroy_tx_ring(&sc->bnx_tx_ring[i]); + kfree(sc->bnx_tx_ring, M_DEVBUF); + } if (BNX_IS_JUMBO_CAPABLE(sc)) bnx_free_jumbo_mem(sc); - /* Destroy RX return ring */ - bnx_dma_block_free(sc->bnx_cdata.bnx_rx_return_ring_tag, - sc->bnx_cdata.bnx_rx_return_ring_map, - sc->bnx_ldata.bnx_rx_return_ring); - - /* Destroy TX ring */ - bnx_dma_block_free(sc->bnx_cdata.bnx_tx_ring_tag, - sc->bnx_cdata.bnx_tx_ring_map, - sc->bnx_ldata.bnx_tx_ring); - /* Destroy status block */ bnx_dma_block_free(sc->bnx_cdata.bnx_status_tag, - sc->bnx_cdata.bnx_status_map, - sc->bnx_ldata.bnx_status_block); + sc->bnx_cdata.bnx_status_map, + sc->bnx_ldata.bnx_status_block); /* Destroy the parent tag */ if (sc->bnx_cdata.bnx_parent_tag != NULL) @@ -3411,10 +3449,11 @@ bnx_dma_free(struct bnx_softc *sc) } static int -bnx_dma_alloc(struct bnx_softc *sc) +bnx_dma_alloc(device_t dev) { - struct ifnet *ifp = &sc->arpcom.ac_if; - int i, error; + struct bnx_softc *sc = device_get_softc(dev); + struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring; + int i, error, mbx; /* * Allocate the parent bus DMA tag appropriate for PCI. @@ -3427,86 +3466,56 @@ bnx_dma_alloc(struct bnx_softc *sc) * state machine will lockup and cause the device to hang. */ error = bus_dma_tag_create(NULL, 1, BGE_DMA_BOUNDARY_4G, - BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, - NULL, NULL, - BUS_SPACE_MAXSIZE_32BIT, 0, - BUS_SPACE_MAXSIZE_32BIT, - 0, &sc->bnx_cdata.bnx_parent_tag); + BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, + BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, + 0, &sc->bnx_cdata.bnx_parent_tag); if (error) { - if_printf(ifp, "could not allocate parent dma tag\n"); + device_printf(dev, "could not create parent DMA tag\n"); return error; } /* - * Create DMA tag and maps for RX mbufs. + * Create DMA stuffs for status block. */ - error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0, - BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, - NULL, NULL, MCLBYTES, 1, MCLBYTES, - BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, - &sc->bnx_cdata.bnx_rx_mtag); - if (error) { - if_printf(ifp, "could not allocate RX mbuf dma tag\n"); - return error; - } - - error = bus_dmamap_create(sc->bnx_cdata.bnx_rx_mtag, - BUS_DMA_WAITOK, &sc->bnx_cdata.bnx_rx_tmpmap); + error = bnx_dma_block_alloc(sc, BGE_STATUS_BLK_SZ, + &sc->bnx_cdata.bnx_status_tag, + &sc->bnx_cdata.bnx_status_map, + (void *)&sc->bnx_ldata.bnx_status_block, + &sc->bnx_ldata.bnx_status_block_paddr); if (error) { - bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag); - sc->bnx_cdata.bnx_rx_mtag = NULL; + device_printf(dev, "could not create status block\n"); return error; } - for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { - error = bus_dmamap_create(sc->bnx_cdata.bnx_rx_mtag, - BUS_DMA_WAITOK, - &sc->bnx_cdata.bnx_rx_std_dmamap[i]); - if (error) { - int j; - - for (j = 0; j < i; ++j) { - bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag, - sc->bnx_cdata.bnx_rx_std_dmamap[j]); - } - bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag); - sc->bnx_cdata.bnx_rx_mtag = NULL; - - if_printf(ifp, "could not create DMA map for RX\n"); - return error; - } - } - /* - * Create DMA tag and maps for TX mbufs. + * Create DMA tag and maps for RX mbufs. */ + std->bnx_sc = sc; + lwkt_serialize_init(&std->bnx_rx_std_serialize); error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0, - BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, - NULL, NULL, - BNX_JUMBO_FRAMELEN, BNX_NSEG_NEW, MCLBYTES, - BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | - BUS_DMA_ONEBPAGE, - &sc->bnx_cdata.bnx_tx_mtag); + BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, + NULL, NULL, MCLBYTES, 1, MCLBYTES, + BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, &std->bnx_rx_mtag); if (error) { - if_printf(ifp, "could not allocate TX mbuf dma tag\n"); + device_printf(dev, "could not create RX mbuf DMA tag\n"); return error; } - for (i = 0; i < BGE_TX_RING_CNT; i++) { - error = bus_dmamap_create(sc->bnx_cdata.bnx_tx_mtag, - BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, - &sc->bnx_cdata.bnx_tx_dmamap[i]); + for (i = 0; i < BGE_STD_RX_RING_CNT; ++i) { + error = bus_dmamap_create(std->bnx_rx_mtag, BUS_DMA_WAITOK, + &std->bnx_rx_std_buf[i].bnx_rx_dmamap); if (error) { int j; for (j = 0; j < i; ++j) { - bus_dmamap_destroy(sc->bnx_cdata.bnx_tx_mtag, - sc->bnx_cdata.bnx_tx_dmamap[j]); + bus_dmamap_destroy(std->bnx_rx_mtag, + std->bnx_rx_std_buf[j].bnx_rx_dmamap); } - bus_dma_tag_destroy(sc->bnx_cdata.bnx_tx_mtag); - sc->bnx_cdata.bnx_tx_mtag = NULL; + bus_dma_tag_destroy(std->bnx_rx_mtag); + std->bnx_rx_mtag = NULL; - if_printf(ifp, "could not create DMA map for TX\n"); + device_printf(dev, + "could not create %dth RX mbuf DMA map\n", i); return error; } } @@ -3515,64 +3524,79 @@ bnx_dma_alloc(struct bnx_softc *sc) * Create DMA stuffs for standard RX ring. */ error = bnx_dma_block_alloc(sc, BGE_STD_RX_RING_SZ, - &sc->bnx_cdata.bnx_rx_std_ring_tag, - &sc->bnx_cdata.bnx_rx_std_ring_map, - (void *)&sc->bnx_ldata.bnx_rx_std_ring, - &sc->bnx_ldata.bnx_rx_std_ring_paddr); + &std->bnx_rx_std_ring_tag, + &std->bnx_rx_std_ring_map, + (void *)&std->bnx_rx_std_ring, + &std->bnx_rx_std_ring_paddr); if (error) { - if_printf(ifp, "could not create std RX ring\n"); + device_printf(dev, "could not create std RX ring\n"); return error; } /* - * Create jumbo buffer pool. + * Create RX return rings */ - if (BNX_IS_JUMBO_CAPABLE(sc)) { - error = bnx_alloc_jumbo_mem(sc); + sc->bnx_rx_ret_ring = kmalloc_cachealign( + sizeof(struct bnx_rx_ret_ring) * sc->bnx_rx_retcnt, M_DEVBUF, + M_WAITOK | M_ZERO); + for (i = 0; i < sc->bnx_rx_retcnt; ++i) { + struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i]; + + ret->bnx_sc = sc; + ret->bnx_std = std; + + /* XXX */ + ret->bnx_rx_considx = + &sc->bnx_ldata.bnx_status_block->bge_idx[0].bge_rx_prod_idx; + + error = bnx_create_rx_ret_ring(ret); if (error) { - if_printf(ifp, "could not create jumbo buffer pool\n"); + device_printf(dev, + "could not create %dth RX ret ring\n", i); return error; } } /* - * Create DMA stuffs for RX return ring. + * Create TX rings */ - error = bnx_dma_block_alloc(sc, - BGE_RX_RTN_RING_SZ(sc->bnx_return_ring_cnt), - &sc->bnx_cdata.bnx_rx_return_ring_tag, - &sc->bnx_cdata.bnx_rx_return_ring_map, - (void *)&sc->bnx_ldata.bnx_rx_return_ring, - &sc->bnx_ldata.bnx_rx_return_ring_paddr); - if (error) { - if_printf(ifp, "could not create RX ret ring\n"); - return error; - } + mbx = BGE_MBX_TX_HOST_PROD0_LO; + sc->bnx_tx_ring = kmalloc_cachealign( + sizeof(struct bnx_tx_ring) * sc->bnx_tx_ringcnt, M_DEVBUF, + M_WAITOK | M_ZERO); + for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { + struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; + + txr->bnx_sc = sc; + txr->bnx_tx_mbx = mbx; + + if (mbx & 0x4) + mbx -= 0x4; + else + mbx += 0xc; - /* - * Create DMA stuffs for TX ring. - */ - error = bnx_dma_block_alloc(sc, BGE_TX_RING_SZ, - &sc->bnx_cdata.bnx_tx_ring_tag, - &sc->bnx_cdata.bnx_tx_ring_map, - (void *)&sc->bnx_ldata.bnx_tx_ring, - &sc->bnx_ldata.bnx_tx_ring_paddr); - if (error) { - if_printf(ifp, "could not create TX ring\n"); - return error; + /* XXX */ + txr->bnx_tx_considx = + &sc->bnx_ldata.bnx_status_block->bge_idx[0].bge_tx_cons_idx; + + error = bnx_create_tx_ring(txr); + if (error) { + device_printf(dev, + "could not create %dth TX ring\n", i); + return error; + } } /* - * Create DMA stuffs for status block. + * Create jumbo buffer pool. */ - error = bnx_dma_block_alloc(sc, BGE_STATUS_BLK_SZ, - &sc->bnx_cdata.bnx_status_tag, - &sc->bnx_cdata.bnx_status_map, - (void *)&sc->bnx_ldata.bnx_status_block, - &sc->bnx_ldata.bnx_status_block_paddr); - if (error) { - if_printf(ifp, "could not create status block\n"); - return error; + if (BNX_IS_JUMBO_CAPABLE(sc)) { + error = bnx_alloc_jumbo_mem(sc); + if (error) { + device_printf(dev, + "could not create jumbo buffer pool\n"); + return error; + } } return 0; @@ -3786,7 +3810,7 @@ bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal, struct ifnet *ifp = &sc->arpcom.ac_if; int error = 0, v; - lwkt_serialize_enter(ifp->if_serializer); + ifnet_serialize_all(ifp); v = *coal; error = sysctl_handle_int(oidp, &v, 0, req); @@ -3796,10 +3820,13 @@ bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal, } else { *coal = v; sc->bnx_coal_chg |= coal_chg_mask; + + /* Commit changes */ + bnx_coal_change(sc); } } - lwkt_serialize_exit(ifp->if_serializer); + ifnet_deserialize_all(ifp); return error; } @@ -3807,15 +3834,14 @@ static void bnx_coal_change(struct bnx_softc *sc) { struct ifnet *ifp = &sc->arpcom.ac_if; - uint32_t val; - ASSERT_SERIALIZED(ifp->if_serializer); + ASSERT_IFNET_SERIALIZED_ALL(ifp); if (sc->bnx_coal_chg & BNX_RX_COAL_TICKS_CHG) { CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bnx_rx_coal_ticks); DELAY(10); - val = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS); + CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS); if (bootverbose) { if_printf(ifp, "rx_coal_ticks -> %u\n", @@ -3827,7 +3853,7 @@ bnx_coal_change(struct bnx_softc *sc) CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bnx_tx_coal_ticks); DELAY(10); - val = CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS); + CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS); if (bootverbose) { if_printf(ifp, "tx_coal_ticks -> %u\n", @@ -3839,7 +3865,7 @@ bnx_coal_change(struct bnx_softc *sc) CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bnx_rx_coal_bds); DELAY(10); - val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS); + CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS); if (bootverbose) { if_printf(ifp, "rx_coal_bds -> %u\n", @@ -3851,10 +3877,10 @@ bnx_coal_change(struct bnx_softc *sc) CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bnx_tx_coal_bds); DELAY(10); - val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS); + CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS); if (bootverbose) { - if_printf(ifp, "tx_max_coal_bds -> %u\n", + if_printf(ifp, "tx_coal_bds -> %u\n", sc->bnx_tx_coal_bds); } } @@ -3863,7 +3889,7 @@ bnx_coal_change(struct bnx_softc *sc) CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, sc->bnx_rx_coal_bds_int); DELAY(10); - val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT); + CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT); if (bootverbose) { if_printf(ifp, "rx_coal_bds_int -> %u\n", @@ -3875,7 +3901,7 @@ bnx_coal_change(struct bnx_softc *sc) CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, sc->bnx_tx_coal_bds_int); DELAY(10); - val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT); + CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT); if (bootverbose) { if_printf(ifp, "tx_coal_bds_int -> %u\n", @@ -3886,12 +3912,60 @@ bnx_coal_change(struct bnx_softc *sc) sc->bnx_coal_chg = 0; } +static void +bnx_check_intr(void *xintr) +{ + struct bnx_intr_data *intr = xintr; + struct bnx_rx_ret_ring *ret; + struct bnx_tx_ring *txr; + struct ifnet *ifp; + + lwkt_serialize_enter(intr->bnx_intr_serialize); + + KKASSERT(mycpuid == intr->bnx_intr_cpuid); + + ifp = &intr->bnx_sc->arpcom.ac_if; + if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) { + lwkt_serialize_exit(intr->bnx_intr_serialize); + return; + } + + txr = intr->bnx_txr; + ret = intr->bnx_ret; + + if (*ret->bnx_rx_considx != ret->bnx_rx_saved_considx || + *txr->bnx_tx_considx != txr->bnx_tx_saved_considx) { + if (intr->bnx_rx_check_considx == ret->bnx_rx_saved_considx && + intr->bnx_tx_check_considx == txr->bnx_tx_saved_considx) { + if (!intr->bnx_intr_maylose) { + intr->bnx_intr_maylose = TRUE; + goto done; + } + if (bootverbose) + if_printf(ifp, "lost interrupt\n"); + intr->bnx_intr_func(intr->bnx_intr_arg); + } + } + intr->bnx_intr_maylose = FALSE; + intr->bnx_rx_check_considx = ret->bnx_rx_saved_considx; + intr->bnx_tx_check_considx = txr->bnx_tx_saved_considx; + +done: + callout_reset(&intr->bnx_intr_timer, BNX_INTR_CKINTVL, + intr->bnx_intr_check, intr); + lwkt_serialize_exit(intr->bnx_intr_serialize); +} + static void bnx_enable_intr(struct bnx_softc *sc) { struct ifnet *ifp = &sc->arpcom.ac_if; + int i; - lwkt_serialize_handler_enable(ifp->if_serializer); + for (i = 0; i < sc->bnx_intr_cnt; ++i) { + lwkt_serialize_handler_enable( + sc->bnx_intr_data[i].bnx_intr_serialize); + } /* * Enable interrupt. @@ -3914,12 +3988,37 @@ bnx_enable_intr(struct bnx_softc *sc) * interrupt. */ BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); + + if (sc->bnx_flags & BNX_FLAG_STATUSTAG_BUG) { + if (bootverbose) + if_printf(ifp, "status tag bug workaround\n"); + + for (i = 0; i < sc->bnx_intr_cnt; ++i) { + struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; + + intr->bnx_intr_maylose = FALSE; + intr->bnx_rx_check_considx = 0; + intr->bnx_tx_check_considx = 0; + callout_reset_bycpu(&intr->bnx_intr_timer, + BNX_INTR_CKINTVL, intr->bnx_intr_check, intr, + intr->bnx_intr_cpuid); + } + } } static void bnx_disable_intr(struct bnx_softc *sc) { - struct ifnet *ifp = &sc->arpcom.ac_if; + int i; + + for (i = 0; i < sc->bnx_intr_cnt; ++i) { + struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; + + callout_stop(&intr->bnx_intr_timer); + intr->bnx_intr_maylose = FALSE; + intr->bnx_rx_check_considx = 0; + intr->bnx_tx_check_considx = 0; + } /* * Mask the interrupt when we start polling. @@ -3932,7 +4031,11 @@ bnx_disable_intr(struct bnx_softc *sc) */ bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1); - lwkt_serialize_handler_disable(ifp->if_serializer); + sc->bnx_npoll.ifpc_stcount = 0; + for (i = 0; i < sc->bnx_intr_cnt; ++i) { + lwkt_serialize_handler_disable( + sc->bnx_intr_data[i].bnx_intr_serialize); + } } static int @@ -3960,8 +4063,15 @@ bnx_get_eaddr_nvram(struct bnx_softc *sc, uint8_t ether_addr[]) { int mac_offset = BGE_EE_MAC_OFFSET; - if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) - mac_offset = BGE_EE_MAC_OFFSET_5906; + if (BNX_IS_5717_PLUS(sc)) { + int f; + + f = pci_get_function(sc->bnx_dev); + if (f & 1) + mac_offset = BGE_EE_MAC_OFFSET_5717; + if (f > 1) + mac_offset += BGE_EE_MAC_OFFSET_5717_OFF; + } return bnx_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN); } @@ -4083,10 +4193,436 @@ bnx_dma_swap_options(struct bnx_softc *sc) #if BYTE_ORDER == BIG_ENDIAN dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME; #endif - if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) { + if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || + sc->bnx_asicrev == BGE_ASICREV_BCM5762) { dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA | BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE; } return dma_options; } + +static int +bnx_setup_tso(struct bnx_tx_ring *txr, struct mbuf **mp, + uint16_t *mss0, uint16_t *flags0) +{ + struct mbuf *m; + struct ip *ip; + struct tcphdr *th; + int thoff, iphlen, hoff, hlen; + uint16_t flags, mss; + + m = *mp; + KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); + + hoff = m->m_pkthdr.csum_lhlen; + iphlen = m->m_pkthdr.csum_iphlen; + thoff = m->m_pkthdr.csum_thlen; + + KASSERT(hoff > 0, ("invalid ether header len")); + KASSERT(iphlen > 0, ("invalid ip header len")); + KASSERT(thoff > 0, ("invalid tcp header len")); + + if (__predict_false(m->m_len < hoff + iphlen + thoff)) { + m = m_pullup(m, hoff + iphlen + thoff); + if (m == NULL) { + *mp = NULL; + return ENOBUFS; + } + *mp = m; + } + ip = mtodoff(m, struct ip *, hoff); + th = mtodoff(m, struct tcphdr *, hoff + iphlen); + + mss = m->m_pkthdr.tso_segsz; + flags = BGE_TXBDFLAG_CPU_PRE_DMA | BGE_TXBDFLAG_CPU_POST_DMA; + + ip->ip_len = htons(mss + iphlen + thoff); + th->th_sum = 0; + + hlen = (iphlen + thoff) >> 2; + mss |= ((hlen & 0x3) << 14); + flags |= ((hlen & 0xf8) << 7) | ((hlen & 0x4) << 2); + + *mss0 = mss; + *flags0 = flags; + + return 0; +} + +static int +bnx_create_tx_ring(struct bnx_tx_ring *txr) +{ + bus_size_t txmaxsz, txmaxsegsz; + int i, error; + + lwkt_serialize_init(&txr->bnx_tx_serialize); + + /* + * Create DMA tag and maps for TX mbufs. + */ + if (txr->bnx_sc->bnx_flags & BNX_FLAG_TSO) + txmaxsz = IP_MAXPACKET + sizeof(struct ether_vlan_header); + else + txmaxsz = BNX_JUMBO_FRAMELEN; + if (txr->bnx_sc->bnx_asicrev == BGE_ASICREV_BCM57766) + txmaxsegsz = MCLBYTES; + else + txmaxsegsz = PAGE_SIZE; + error = bus_dma_tag_create(txr->bnx_sc->bnx_cdata.bnx_parent_tag, + 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, + txmaxsz, BNX_NSEG_NEW, txmaxsegsz, + BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, + &txr->bnx_tx_mtag); + if (error) { + device_printf(txr->bnx_sc->bnx_dev, + "could not create TX mbuf DMA tag\n"); + return error; + } + + for (i = 0; i < BGE_TX_RING_CNT; i++) { + error = bus_dmamap_create(txr->bnx_tx_mtag, + BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, + &txr->bnx_tx_buf[i].bnx_tx_dmamap); + if (error) { + int j; + + for (j = 0; j < i; ++j) { + bus_dmamap_destroy(txr->bnx_tx_mtag, + txr->bnx_tx_buf[j].bnx_tx_dmamap); + } + bus_dma_tag_destroy(txr->bnx_tx_mtag); + txr->bnx_tx_mtag = NULL; + + device_printf(txr->bnx_sc->bnx_dev, + "could not create TX mbuf DMA map\n"); + return error; + } + } + + /* + * Create DMA stuffs for TX ring. + */ + error = bnx_dma_block_alloc(txr->bnx_sc, BGE_TX_RING_SZ, + &txr->bnx_tx_ring_tag, + &txr->bnx_tx_ring_map, + (void *)&txr->bnx_tx_ring, + &txr->bnx_tx_ring_paddr); + if (error) { + device_printf(txr->bnx_sc->bnx_dev, + "could not create TX ring\n"); + return error; + } + + txr->bnx_tx_flags |= BNX_TX_FLAG_SHORTDMA; + txr->bnx_tx_wreg = BNX_TX_WREG_NSEGS; + + return 0; +} + +static void +bnx_destroy_tx_ring(struct bnx_tx_ring *txr) +{ + /* Destroy TX mbuf DMA stuffs. */ + if (txr->bnx_tx_mtag != NULL) { + int i; + + for (i = 0; i < BGE_TX_RING_CNT; i++) { + KKASSERT(txr->bnx_tx_buf[i].bnx_tx_mbuf == NULL); + bus_dmamap_destroy(txr->bnx_tx_mtag, + txr->bnx_tx_buf[i].bnx_tx_dmamap); + } + bus_dma_tag_destroy(txr->bnx_tx_mtag); + } + + /* Destroy TX ring */ + bnx_dma_block_free(txr->bnx_tx_ring_tag, + txr->bnx_tx_ring_map, txr->bnx_tx_ring); +} + +static int +bnx_sysctl_force_defrag(SYSCTL_HANDLER_ARGS) +{ + struct bnx_softc *sc = (void *)arg1; + struct ifnet *ifp = &sc->arpcom.ac_if; + struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; + int error, defrag, i; + + if (txr->bnx_tx_flags & BNX_TX_FLAG_FORCE_DEFRAG) + defrag = 1; + else + defrag = 0; + + error = sysctl_handle_int(oidp, &defrag, 0, req); + if (error || req->newptr == NULL) + return error; + + ifnet_serialize_all(ifp); + for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { + txr = &sc->bnx_tx_ring[i]; + if (defrag) + txr->bnx_tx_flags |= BNX_TX_FLAG_FORCE_DEFRAG; + else + txr->bnx_tx_flags &= ~BNX_TX_FLAG_FORCE_DEFRAG; + } + ifnet_deserialize_all(ifp); + + return 0; +} + +static int +bnx_sysctl_tx_wreg(SYSCTL_HANDLER_ARGS) +{ + struct bnx_softc *sc = (void *)arg1; + struct ifnet *ifp = &sc->arpcom.ac_if; + struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; + int error, tx_wreg, i; + + tx_wreg = txr->bnx_tx_wreg; + error = sysctl_handle_int(oidp, &tx_wreg, 0, req); + if (error || req->newptr == NULL) + return error; + + ifnet_serialize_all(ifp); + for (i = 0; i < sc->bnx_tx_ringcnt; ++i) + sc->bnx_tx_ring[i].bnx_tx_wreg = tx_wreg; + ifnet_deserialize_all(ifp); + + return 0; +} + +static int +bnx_create_rx_ret_ring(struct bnx_rx_ret_ring *ret) +{ + int error; + + lwkt_serialize_init(&ret->bnx_rx_ret_serialize); + + /* + * Create DMA stuffs for RX return ring. + */ + error = bnx_dma_block_alloc(ret->bnx_sc, + BGE_RX_RTN_RING_SZ(BNX_RETURN_RING_CNT), + &ret->bnx_rx_ret_ring_tag, + &ret->bnx_rx_ret_ring_map, + (void *)&ret->bnx_rx_ret_ring, + &ret->bnx_rx_ret_ring_paddr); + if (error) { + device_printf(ret->bnx_sc->bnx_dev, + "could not create RX ret ring\n"); + return error; + } + + /* Shadow standard ring's RX mbuf DMA tag */ + ret->bnx_rx_mtag = ret->bnx_std->bnx_rx_mtag; + + /* + * Create tmp DMA map for RX mbufs. + */ + error = bus_dmamap_create(ret->bnx_rx_mtag, BUS_DMA_WAITOK, + &ret->bnx_rx_tmpmap); + if (error) { + device_printf(ret->bnx_sc->bnx_dev, + "could not create tmp RX mbuf DMA map\n"); + ret->bnx_rx_mtag = NULL; + return error; + } + return 0; +} + +static void +bnx_destroy_rx_ret_ring(struct bnx_rx_ret_ring *ret) +{ + /* Destroy tmp RX mbuf DMA map */ + if (ret->bnx_rx_mtag != NULL) + bus_dmamap_destroy(ret->bnx_rx_mtag, ret->bnx_rx_tmpmap); + + /* Destroy RX return ring */ + bnx_dma_block_free(ret->bnx_rx_ret_ring_tag, + ret->bnx_rx_ret_ring_map, ret->bnx_rx_ret_ring); +} + +static int +bnx_alloc_intr(struct bnx_softc *sc) +{ + struct bnx_intr_data *intr; + u_int intr_flags; + + sc->bnx_intr_cnt = 1; + + intr = &sc->bnx_intr_data[0]; + intr->bnx_sc = sc; + intr->bnx_ret = &sc->bnx_rx_ret_ring[0]; + intr->bnx_txr = &sc->bnx_tx_ring[0]; + intr->bnx_intr_serialize = &sc->bnx_main_serialize; + callout_init_mp(&intr->bnx_intr_timer); + intr->bnx_intr_check = bnx_check_intr; + + sc->bnx_intr_type = pci_alloc_1intr(sc->bnx_dev, bnx_msi_enable, + &intr->bnx_intr_rid, &intr_flags); + + intr->bnx_intr_res = bus_alloc_resource_any(sc->bnx_dev, SYS_RES_IRQ, + &intr->bnx_intr_rid, intr_flags); + if (intr->bnx_intr_res == NULL) { + device_printf(sc->bnx_dev, "could not alloc interrupt\n"); + return ENXIO; + } + + if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI) { + sc->bnx_flags |= BNX_FLAG_ONESHOT_MSI; + bnx_enable_msi(sc); + + if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) { + intr->bnx_intr_func = bnx_msi_oneshot; + if (bootverbose) + device_printf(sc->bnx_dev, "oneshot MSI\n"); + } else { + intr->bnx_intr_func = bnx_msi; + } + } else { + intr->bnx_intr_func = bnx_intr_legacy; + } + intr->bnx_intr_arg = sc; + intr->bnx_intr_cpuid = rman_get_cpuid(intr->bnx_intr_res); + + intr->bnx_txr->bnx_tx_cpuid = intr->bnx_intr_cpuid; + + return 0; +} + +static int +bnx_setup_intr(struct bnx_softc *sc) +{ + int error, i; + + for (i = 0; i < sc->bnx_intr_cnt; ++i) { + struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; + + error = bus_setup_intr_descr(sc->bnx_dev, intr->bnx_intr_res, + INTR_MPSAFE, intr->bnx_intr_func, intr->bnx_intr_arg, + &intr->bnx_intr_hand, intr->bnx_intr_serialize, + intr->bnx_intr_desc); + if (error) { + device_printf(sc->bnx_dev, + "could not set up %dth intr\n", i); + bnx_teardown_intr(sc, i); + return error; + } + } + return 0; +} + +static void +bnx_teardown_intr(struct bnx_softc *sc, int cnt) +{ + int i; + + for (i = 0; i < cnt; ++i) { + struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; + + bus_teardown_intr(sc->bnx_dev, intr->bnx_intr_res, + intr->bnx_intr_hand); + } +} + +static void +bnx_free_intr(struct bnx_softc *sc) +{ + struct bnx_intr_data *intr; + + KKASSERT(sc->bnx_intr_cnt <= 1); + intr = &sc->bnx_intr_data[0]; + + if (intr->bnx_intr_res != NULL) { + bus_release_resource(sc->bnx_dev, SYS_RES_IRQ, + intr->bnx_intr_rid, intr->bnx_intr_res); + } + if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI) + pci_release_msi(sc->bnx_dev); +} + +static void +bnx_setup_serialize(struct bnx_softc *sc) +{ + int i, j; + + /* + * Allocate serializer array + */ + + /* Main + RX STD + TX + RX RET */ + sc->bnx_serialize_cnt = 1 + 1 + sc->bnx_tx_ringcnt + sc->bnx_rx_retcnt; + + sc->bnx_serialize = + kmalloc(sc->bnx_serialize_cnt * sizeof(struct lwkt_serialize *), + M_DEVBUF, M_WAITOK | M_ZERO); + + /* + * Setup serializers + * + * NOTE: Order is critical + */ + + i = 0; + + KKASSERT(i < sc->bnx_serialize_cnt); + sc->bnx_serialize[i++] = &sc->bnx_main_serialize; + + KKASSERT(i < sc->bnx_serialize_cnt); + sc->bnx_serialize[i++] = &sc->bnx_rx_std_ring.bnx_rx_std_serialize; + + for (j = 0; j < sc->bnx_rx_retcnt; ++j) { + KKASSERT(i < sc->bnx_serialize_cnt); + sc->bnx_serialize[i++] = + &sc->bnx_rx_ret_ring[j].bnx_rx_ret_serialize; + } + + for (j = 0; j < sc->bnx_tx_ringcnt; ++j) { + KKASSERT(i < sc->bnx_serialize_cnt); + sc->bnx_serialize[i++] = + &sc->bnx_tx_ring[j].bnx_tx_serialize; + } + + KKASSERT(i == sc->bnx_serialize_cnt); +} + +static void +bnx_serialize(struct ifnet *ifp, enum ifnet_serialize slz) +{ + struct bnx_softc *sc = ifp->if_softc; + + ifnet_serialize_array_enter(sc->bnx_serialize, + sc->bnx_serialize_cnt, slz); +} + +static void +bnx_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) +{ + struct bnx_softc *sc = ifp->if_softc; + + ifnet_serialize_array_exit(sc->bnx_serialize, + sc->bnx_serialize_cnt, slz); +} + +static int +bnx_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) +{ + struct bnx_softc *sc = ifp->if_softc; + + return ifnet_serialize_array_try(sc->bnx_serialize, + sc->bnx_serialize_cnt, slz); +} + +#ifdef INVARIANTS + +static void +bnx_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, + boolean_t serialized) +{ + struct bnx_softc *sc = ifp->if_softc; + + ifnet_serialize_array_assert(sc->bnx_serialize, sc->bnx_serialize_cnt, + slz, serialized); +} + +#endif /* INVARIANTS */