2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
37 #include "opt_ifpoll.h"
39 #include <sys/param.h>
41 #include <sys/endian.h>
42 #include <sys/kernel.h>
43 #include <sys/interrupt.h>
45 #include <sys/malloc.h>
46 #include <sys/queue.h>
48 #include <sys/serialize.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/sysctl.h>
53 #include <netinet/ip.h>
54 #include <netinet/tcp.h>
57 #include <net/ethernet.h>
59 #include <net/if_arp.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/if_poll.h>
63 #include <net/if_types.h>
64 #include <net/ifq_var.h>
65 #include <net/vlan/if_vlan_var.h>
66 #include <net/vlan/if_vlan_ether.h>
68 #include <dev/netif/mii_layer/mii.h>
69 #include <dev/netif/mii_layer/miivar.h>
70 #include <dev/netif/mii_layer/brgphyreg.h>
72 #include <bus/pci/pcidevs.h>
73 #include <bus/pci/pcireg.h>
74 #include <bus/pci/pcivar.h>
76 #include <dev/netif/bge/if_bgereg.h>
77 #include <dev/netif/bnx/if_bnxvar.h>
79 /* "device miibus" required. See GENERIC if you get errors here. */
80 #include "miibus_if.h"
82 #define BNX_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
84 #define BNX_INTR_CKINTVL ((10 * hz) / 1000) /* 10ms */
86 static const struct bnx_type {
91 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717,
92 "Broadcom BCM5717 Gigabit Ethernet" },
93 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717C,
94 "Broadcom BCM5717C Gigabit Ethernet" },
95 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718,
96 "Broadcom BCM5718 Gigabit Ethernet" },
97 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5719,
98 "Broadcom BCM5719 Gigabit Ethernet" },
99 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720_ALT,
100 "Broadcom BCM5720 Gigabit Ethernet" },
102 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5725,
103 "Broadcom BCM5725 Gigabit Ethernet" },
104 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5727,
105 "Broadcom BCM5727 Gigabit Ethernet" },
106 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5762,
107 "Broadcom BCM5762 Gigabit Ethernet" },
109 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57761,
110 "Broadcom BCM57761 Gigabit Ethernet" },
111 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57762,
112 "Broadcom BCM57762 Gigabit Ethernet" },
113 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57765,
114 "Broadcom BCM57765 Gigabit Ethernet" },
115 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57766,
116 "Broadcom BCM57766 Gigabit Ethernet" },
117 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57781,
118 "Broadcom BCM57781 Gigabit Ethernet" },
119 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57782,
120 "Broadcom BCM57782 Gigabit Ethernet" },
121 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57785,
122 "Broadcom BCM57785 Gigabit Ethernet" },
123 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57786,
124 "Broadcom BCM57786 Gigabit Ethernet" },
125 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57791,
126 "Broadcom BCM57791 Fast Ethernet" },
127 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57795,
128 "Broadcom BCM57795 Fast Ethernet" },
133 #define BNX_IS_JUMBO_CAPABLE(sc) ((sc)->bnx_flags & BNX_FLAG_JUMBO)
134 #define BNX_IS_5717_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_5717_PLUS)
135 #define BNX_IS_57765_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_57765_PLUS)
136 #define BNX_IS_57765_FAMILY(sc) \
137 ((sc)->bnx_flags & BNX_FLAG_57765_FAMILY)
139 typedef int (*bnx_eaddr_fcn_t)(struct bnx_softc *, uint8_t[]);
141 static int bnx_probe(device_t);
142 static int bnx_attach(device_t);
143 static int bnx_detach(device_t);
144 static void bnx_shutdown(device_t);
145 static int bnx_suspend(device_t);
146 static int bnx_resume(device_t);
147 static int bnx_miibus_readreg(device_t, int, int);
148 static int bnx_miibus_writereg(device_t, int, int, int);
149 static void bnx_miibus_statchg(device_t);
152 static void bnx_npoll(struct ifnet *, struct ifpoll_info *);
153 static void bnx_npoll_compat(struct ifnet *, void *, int);
155 static void bnx_intr_legacy(void *);
156 static void bnx_msi(void *);
157 static void bnx_msi_oneshot(void *);
158 static void bnx_intr(struct bnx_softc *);
159 static void bnx_enable_intr(struct bnx_softc *);
160 static void bnx_disable_intr(struct bnx_softc *);
161 static void bnx_txeof(struct bnx_tx_ring *, uint16_t);
162 static void bnx_rxeof(struct bnx_softc *, uint16_t, int);
164 static void bnx_start(struct ifnet *, struct ifaltq_subque *);
165 static int bnx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
166 static void bnx_init(void *);
167 static void bnx_stop(struct bnx_softc *);
168 static void bnx_watchdog(struct ifnet *);
169 static int bnx_ifmedia_upd(struct ifnet *);
170 static void bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *);
171 static void bnx_tick(void *);
173 static int bnx_alloc_jumbo_mem(struct bnx_softc *);
174 static void bnx_free_jumbo_mem(struct bnx_softc *);
175 static struct bnx_jslot
176 *bnx_jalloc(struct bnx_softc *);
177 static void bnx_jfree(void *);
178 static void bnx_jref(void *);
179 static int bnx_newbuf_std(struct bnx_softc *, int, int);
180 static int bnx_newbuf_jumbo(struct bnx_softc *, int, int);
181 static void bnx_setup_rxdesc_std(struct bnx_softc *, int);
182 static void bnx_setup_rxdesc_jumbo(struct bnx_softc *, int);
183 static int bnx_init_rx_ring_std(struct bnx_softc *);
184 static void bnx_free_rx_ring_std(struct bnx_softc *);
185 static int bnx_init_rx_ring_jumbo(struct bnx_softc *);
186 static void bnx_free_rx_ring_jumbo(struct bnx_softc *);
187 static void bnx_free_tx_ring(struct bnx_tx_ring *);
188 static int bnx_init_tx_ring(struct bnx_tx_ring *);
189 static int bnx_create_tx_ring(struct bnx_tx_ring *);
190 static void bnx_destroy_tx_ring(struct bnx_tx_ring *);
191 static int bnx_dma_alloc(struct bnx_softc *);
192 static void bnx_dma_free(struct bnx_softc *);
193 static int bnx_dma_block_alloc(struct bnx_softc *, bus_size_t,
194 bus_dma_tag_t *, bus_dmamap_t *, void **, bus_addr_t *);
195 static void bnx_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
197 bnx_defrag_shortdma(struct mbuf *);
198 static int bnx_encap(struct bnx_tx_ring *, struct mbuf **,
200 static int bnx_setup_tso(struct bnx_tx_ring *, struct mbuf **,
201 uint16_t *, uint16_t *);
203 static void bnx_reset(struct bnx_softc *);
204 static int bnx_chipinit(struct bnx_softc *);
205 static int bnx_blockinit(struct bnx_softc *);
206 static void bnx_stop_block(struct bnx_softc *, bus_size_t, uint32_t);
207 static void bnx_enable_msi(struct bnx_softc *sc);
208 static void bnx_setmulti(struct bnx_softc *);
209 static void bnx_setpromisc(struct bnx_softc *);
210 static void bnx_stats_update_regs(struct bnx_softc *);
211 static uint32_t bnx_dma_swap_options(struct bnx_softc *);
213 static uint32_t bnx_readmem_ind(struct bnx_softc *, uint32_t);
214 static void bnx_writemem_ind(struct bnx_softc *, uint32_t, uint32_t);
216 static uint32_t bnx_readreg_ind(struct bnx_softc *, uint32_t);
218 static void bnx_writemem_direct(struct bnx_softc *, uint32_t, uint32_t);
219 static void bnx_writembx(struct bnx_softc *, int, int);
220 static int bnx_read_nvram(struct bnx_softc *, caddr_t, int, int);
221 static uint8_t bnx_eeprom_getbyte(struct bnx_softc *, uint32_t, uint8_t *);
222 static int bnx_read_eeprom(struct bnx_softc *, caddr_t, uint32_t, size_t);
224 static void bnx_tbi_link_upd(struct bnx_softc *, uint32_t);
225 static void bnx_copper_link_upd(struct bnx_softc *, uint32_t);
226 static void bnx_autopoll_link_upd(struct bnx_softc *, uint32_t);
227 static void bnx_link_poll(struct bnx_softc *);
229 static int bnx_get_eaddr_mem(struct bnx_softc *, uint8_t[]);
230 static int bnx_get_eaddr_nvram(struct bnx_softc *, uint8_t[]);
231 static int bnx_get_eaddr_eeprom(struct bnx_softc *, uint8_t[]);
232 static int bnx_get_eaddr(struct bnx_softc *, uint8_t[]);
234 static void bnx_coal_change(struct bnx_softc *);
235 static int bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS);
236 static int bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS);
237 static int bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS);
238 static int bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS);
239 static int bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS);
240 static int bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS);
241 static int bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *,
244 static int bnx_msi_enable = 1;
245 TUNABLE_INT("hw.bnx.msi.enable", &bnx_msi_enable);
247 static device_method_t bnx_methods[] = {
248 /* Device interface */
249 DEVMETHOD(device_probe, bnx_probe),
250 DEVMETHOD(device_attach, bnx_attach),
251 DEVMETHOD(device_detach, bnx_detach),
252 DEVMETHOD(device_shutdown, bnx_shutdown),
253 DEVMETHOD(device_suspend, bnx_suspend),
254 DEVMETHOD(device_resume, bnx_resume),
257 DEVMETHOD(bus_print_child, bus_generic_print_child),
258 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
261 DEVMETHOD(miibus_readreg, bnx_miibus_readreg),
262 DEVMETHOD(miibus_writereg, bnx_miibus_writereg),
263 DEVMETHOD(miibus_statchg, bnx_miibus_statchg),
268 static DEFINE_CLASS_0(bnx, bnx_driver, bnx_methods, sizeof(struct bnx_softc));
269 static devclass_t bnx_devclass;
271 DECLARE_DUMMY_MODULE(if_bnx);
272 DRIVER_MODULE(if_bnx, pci, bnx_driver, bnx_devclass, NULL, NULL);
273 DRIVER_MODULE(miibus, bnx, miibus_driver, miibus_devclass, NULL, NULL);
276 bnx_readmem_ind(struct bnx_softc *sc, uint32_t off)
278 device_t dev = sc->bnx_dev;
281 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
282 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
283 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
288 bnx_writemem_ind(struct bnx_softc *sc, uint32_t off, uint32_t val)
290 device_t dev = sc->bnx_dev;
292 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
293 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
294 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
298 bnx_writemem_direct(struct bnx_softc *sc, uint32_t off, uint32_t val)
300 CSR_WRITE_4(sc, off, val);
304 bnx_writembx(struct bnx_softc *sc, int off, int val)
306 CSR_WRITE_4(sc, off, val);
310 * Read a sequence of bytes from NVRAM.
313 bnx_read_nvram(struct bnx_softc *sc, caddr_t dest, int off, int cnt)
319 * Read a byte of data stored in the EEPROM at address 'addr.' The
320 * BCM570x supports both the traditional bitbang interface and an
321 * auto access interface for reading the EEPROM. We use the auto
325 bnx_eeprom_getbyte(struct bnx_softc *sc, uint32_t addr, uint8_t *dest)
331 * Enable use of auto EEPROM access so we can avoid
332 * having to use the bitbang method.
334 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
336 /* Reset the EEPROM, load the clock period. */
337 CSR_WRITE_4(sc, BGE_EE_ADDR,
338 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
341 /* Issue the read EEPROM command. */
342 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
344 /* Wait for completion */
345 for(i = 0; i < BNX_TIMEOUT * 10; i++) {
347 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
351 if (i == BNX_TIMEOUT) {
352 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
357 byte = CSR_READ_4(sc, BGE_EE_DATA);
359 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
365 * Read a sequence of bytes from the EEPROM.
368 bnx_read_eeprom(struct bnx_softc *sc, caddr_t dest, uint32_t off, size_t len)
374 for (byte = 0, err = 0, i = 0; i < len; i++) {
375 err = bnx_eeprom_getbyte(sc, off + i, &byte);
385 bnx_miibus_readreg(device_t dev, int phy, int reg)
387 struct bnx_softc *sc = device_get_softc(dev);
391 KASSERT(phy == sc->bnx_phyno,
392 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno));
394 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
395 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
396 CSR_WRITE_4(sc, BGE_MI_MODE,
397 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
401 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
402 BGE_MIPHY(phy) | BGE_MIREG(reg));
404 /* Poll for the PHY register access to complete. */
405 for (i = 0; i < BNX_TIMEOUT; i++) {
407 val = CSR_READ_4(sc, BGE_MI_COMM);
408 if ((val & BGE_MICOMM_BUSY) == 0) {
410 val = CSR_READ_4(sc, BGE_MI_COMM);
414 if (i == BNX_TIMEOUT) {
415 if_printf(&sc->arpcom.ac_if, "PHY read timed out "
416 "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val);
420 /* Restore the autopoll bit if necessary. */
421 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
422 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
426 if (val & BGE_MICOMM_READFAIL)
429 return (val & 0xFFFF);
433 bnx_miibus_writereg(device_t dev, int phy, int reg, int val)
435 struct bnx_softc *sc = device_get_softc(dev);
438 KASSERT(phy == sc->bnx_phyno,
439 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno));
441 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
442 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
443 CSR_WRITE_4(sc, BGE_MI_MODE,
444 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
448 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
449 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
451 for (i = 0; i < BNX_TIMEOUT; i++) {
453 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
455 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
459 if (i == BNX_TIMEOUT) {
460 if_printf(&sc->arpcom.ac_if, "PHY write timed out "
461 "(phy %d, reg %d, val %d)\n", phy, reg, val);
464 /* Restore the autopoll bit if necessary. */
465 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
466 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
474 bnx_miibus_statchg(device_t dev)
476 struct bnx_softc *sc;
477 struct mii_data *mii;
479 sc = device_get_softc(dev);
480 mii = device_get_softc(sc->bnx_miibus);
482 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
483 (IFM_ACTIVE | IFM_AVALID)) {
484 switch (IFM_SUBTYPE(mii->mii_media_active)) {
501 if (sc->bnx_link == 0)
504 BNX_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
505 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
506 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
507 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
509 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
512 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
513 BNX_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
515 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
520 * Memory management for jumbo frames.
523 bnx_alloc_jumbo_mem(struct bnx_softc *sc)
525 struct ifnet *ifp = &sc->arpcom.ac_if;
526 struct bnx_jslot *entry;
532 * Create tag for jumbo mbufs.
533 * This is really a bit of a kludge. We allocate a special
534 * jumbo buffer pool which (thanks to the way our DMA
535 * memory allocation works) will consist of contiguous
536 * pages. This means that even though a jumbo buffer might
537 * be larger than a page size, we don't really need to
538 * map it into more than one DMA segment. However, the
539 * default mbuf tag will result in multi-segment mappings,
540 * so we have to create a special jumbo mbuf tag that
541 * lets us get away with mapping the jumbo buffers as
542 * a single segment. I think eventually the driver should
543 * be changed so that it uses ordinary mbufs and cluster
544 * buffers, i.e. jumbo frames can span multiple DMA
545 * descriptors. But that's a project for another day.
549 * Create DMA stuffs for jumbo RX ring.
551 error = bnx_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ,
552 &sc->bnx_cdata.bnx_rx_jumbo_ring_tag,
553 &sc->bnx_cdata.bnx_rx_jumbo_ring_map,
554 (void *)&sc->bnx_ldata.bnx_rx_jumbo_ring,
555 &sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
557 if_printf(ifp, "could not create jumbo RX ring\n");
562 * Create DMA stuffs for jumbo buffer block.
564 error = bnx_dma_block_alloc(sc, BNX_JMEM,
565 &sc->bnx_cdata.bnx_jumbo_tag,
566 &sc->bnx_cdata.bnx_jumbo_map,
567 (void **)&sc->bnx_ldata.bnx_jumbo_buf,
570 if_printf(ifp, "could not create jumbo buffer\n");
574 SLIST_INIT(&sc->bnx_jfree_listhead);
577 * Now divide it up into 9K pieces and save the addresses
578 * in an array. Note that we play an evil trick here by using
579 * the first few bytes in the buffer to hold the the address
580 * of the softc structure for this interface. This is because
581 * bnx_jfree() needs it, but it is called by the mbuf management
582 * code which will not pass it to us explicitly.
584 for (i = 0, ptr = sc->bnx_ldata.bnx_jumbo_buf; i < BNX_JSLOTS; i++) {
585 entry = &sc->bnx_cdata.bnx_jslots[i];
587 entry->bnx_buf = ptr;
588 entry->bnx_paddr = paddr;
589 entry->bnx_inuse = 0;
591 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead, entry, jslot_link);
600 bnx_free_jumbo_mem(struct bnx_softc *sc)
602 /* Destroy jumbo RX ring. */
603 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_jumbo_ring_tag,
604 sc->bnx_cdata.bnx_rx_jumbo_ring_map,
605 sc->bnx_ldata.bnx_rx_jumbo_ring);
607 /* Destroy jumbo buffer block. */
608 bnx_dma_block_free(sc->bnx_cdata.bnx_jumbo_tag,
609 sc->bnx_cdata.bnx_jumbo_map,
610 sc->bnx_ldata.bnx_jumbo_buf);
614 * Allocate a jumbo buffer.
616 static struct bnx_jslot *
617 bnx_jalloc(struct bnx_softc *sc)
619 struct bnx_jslot *entry;
621 lwkt_serialize_enter(&sc->bnx_jslot_serializer);
622 entry = SLIST_FIRST(&sc->bnx_jfree_listhead);
624 SLIST_REMOVE_HEAD(&sc->bnx_jfree_listhead, jslot_link);
625 entry->bnx_inuse = 1;
627 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
629 lwkt_serialize_exit(&sc->bnx_jslot_serializer);
634 * Adjust usage count on a jumbo buffer.
639 struct bnx_jslot *entry = (struct bnx_jslot *)arg;
640 struct bnx_softc *sc = entry->bnx_sc;
643 panic("bnx_jref: can't find softc pointer!");
645 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) {
646 panic("bnx_jref: asked to reference buffer "
647 "that we don't manage!");
648 } else if (entry->bnx_inuse == 0) {
649 panic("bnx_jref: buffer already free!");
651 atomic_add_int(&entry->bnx_inuse, 1);
656 * Release a jumbo buffer.
661 struct bnx_jslot *entry = (struct bnx_jslot *)arg;
662 struct bnx_softc *sc = entry->bnx_sc;
665 panic("bnx_jfree: can't find softc pointer!");
667 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) {
668 panic("bnx_jfree: asked to free buffer that we don't manage!");
669 } else if (entry->bnx_inuse == 0) {
670 panic("bnx_jfree: buffer already free!");
673 * Possible MP race to 0, use the serializer. The atomic insn
674 * is still needed for races against bnx_jref().
676 lwkt_serialize_enter(&sc->bnx_jslot_serializer);
677 atomic_subtract_int(&entry->bnx_inuse, 1);
678 if (entry->bnx_inuse == 0) {
679 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead,
682 lwkt_serialize_exit(&sc->bnx_jslot_serializer);
688 * Intialize a standard receive ring descriptor.
691 bnx_newbuf_std(struct bnx_softc *sc, int i, int init)
693 struct mbuf *m_new = NULL;
694 bus_dma_segment_t seg;
698 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
701 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
702 m_adj(m_new, ETHER_ALIGN);
704 error = bus_dmamap_load_mbuf_segment(sc->bnx_cdata.bnx_rx_mtag,
705 sc->bnx_cdata.bnx_rx_tmpmap, m_new,
706 &seg, 1, &nsegs, BUS_DMA_NOWAIT);
713 bus_dmamap_sync(sc->bnx_cdata.bnx_rx_mtag,
714 sc->bnx_cdata.bnx_rx_std_dmamap[i],
715 BUS_DMASYNC_POSTREAD);
716 bus_dmamap_unload(sc->bnx_cdata.bnx_rx_mtag,
717 sc->bnx_cdata.bnx_rx_std_dmamap[i]);
720 map = sc->bnx_cdata.bnx_rx_tmpmap;
721 sc->bnx_cdata.bnx_rx_tmpmap = sc->bnx_cdata.bnx_rx_std_dmamap[i];
722 sc->bnx_cdata.bnx_rx_std_dmamap[i] = map;
724 sc->bnx_cdata.bnx_rx_std_chain[i].bnx_mbuf = m_new;
725 sc->bnx_cdata.bnx_rx_std_chain[i].bnx_paddr = seg.ds_addr;
727 bnx_setup_rxdesc_std(sc, i);
732 bnx_setup_rxdesc_std(struct bnx_softc *sc, int i)
734 struct bnx_rxchain *rc;
737 rc = &sc->bnx_cdata.bnx_rx_std_chain[i];
738 r = &sc->bnx_ldata.bnx_rx_std_ring[i];
740 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_paddr);
741 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_paddr);
742 r->bge_len = rc->bnx_mbuf->m_len;
744 r->bge_flags = BGE_RXBDFLAG_END;
748 * Initialize a jumbo receive ring descriptor. This allocates
749 * a jumbo buffer from the pool managed internally by the driver.
752 bnx_newbuf_jumbo(struct bnx_softc *sc, int i, int init)
754 struct mbuf *m_new = NULL;
755 struct bnx_jslot *buf;
758 /* Allocate the mbuf. */
759 MGETHDR(m_new, init ? MB_WAIT : MB_DONTWAIT, MT_DATA);
763 /* Allocate the jumbo buffer */
764 buf = bnx_jalloc(sc);
770 /* Attach the buffer to the mbuf. */
771 m_new->m_ext.ext_arg = buf;
772 m_new->m_ext.ext_buf = buf->bnx_buf;
773 m_new->m_ext.ext_free = bnx_jfree;
774 m_new->m_ext.ext_ref = bnx_jref;
775 m_new->m_ext.ext_size = BNX_JUMBO_FRAMELEN;
777 m_new->m_flags |= M_EXT;
779 m_new->m_data = m_new->m_ext.ext_buf;
780 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
782 paddr = buf->bnx_paddr;
783 m_adj(m_new, ETHER_ALIGN);
784 paddr += ETHER_ALIGN;
786 /* Save necessary information */
787 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_mbuf = m_new;
788 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_paddr = paddr;
790 /* Set up the descriptor. */
791 bnx_setup_rxdesc_jumbo(sc, i);
796 bnx_setup_rxdesc_jumbo(struct bnx_softc *sc, int i)
799 struct bnx_rxchain *rc;
801 r = &sc->bnx_ldata.bnx_rx_jumbo_ring[i];
802 rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i];
804 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_paddr);
805 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_paddr);
806 r->bge_len = rc->bnx_mbuf->m_len;
808 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
812 bnx_init_rx_ring_std(struct bnx_softc *sc)
816 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
817 error = bnx_newbuf_std(sc, i, 1);
822 sc->bnx_std = BGE_STD_RX_RING_CNT - 1;
823 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bnx_std);
829 bnx_free_rx_ring_std(struct bnx_softc *sc)
833 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
834 struct bnx_rxchain *rc = &sc->bnx_cdata.bnx_rx_std_chain[i];
836 if (rc->bnx_mbuf != NULL) {
837 bus_dmamap_unload(sc->bnx_cdata.bnx_rx_mtag,
838 sc->bnx_cdata.bnx_rx_std_dmamap[i]);
839 m_freem(rc->bnx_mbuf);
842 bzero(&sc->bnx_ldata.bnx_rx_std_ring[i],
843 sizeof(struct bge_rx_bd));
848 bnx_init_rx_ring_jumbo(struct bnx_softc *sc)
853 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
854 error = bnx_newbuf_jumbo(sc, i, 1);
859 sc->bnx_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
861 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb;
862 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
863 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
865 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo);
871 bnx_free_rx_ring_jumbo(struct bnx_softc *sc)
875 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
876 struct bnx_rxchain *rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i];
878 if (rc->bnx_mbuf != NULL) {
879 m_freem(rc->bnx_mbuf);
882 bzero(&sc->bnx_ldata.bnx_rx_jumbo_ring[i],
883 sizeof(struct bge_rx_bd));
888 bnx_free_tx_ring(struct bnx_tx_ring *txr)
892 for (i = 0; i < BGE_TX_RING_CNT; i++) {
893 if (txr->bnx_tx_chain[i] != NULL) {
894 bus_dmamap_unload(txr->bnx_tx_mtag,
895 txr->bnx_tx_dmamap[i]);
896 m_freem(txr->bnx_tx_chain[i]);
897 txr->bnx_tx_chain[i] = NULL;
899 bzero(&txr->bnx_tx_ring[i], sizeof(struct bge_tx_bd));
901 txr->bnx_tx_saved_considx = BNX_TXCONS_UNSET;
905 bnx_init_tx_ring(struct bnx_tx_ring *txr)
908 txr->bnx_tx_saved_considx = 0;
909 txr->bnx_tx_prodidx = 0;
911 /* Initialize transmit producer index for host-memory send ring. */
912 bnx_writembx(txr->bnx_sc, BGE_MBX_TX_HOST_PROD0_LO,
913 txr->bnx_tx_prodidx);
919 bnx_setmulti(struct bnx_softc *sc)
922 struct ifmultiaddr *ifma;
923 uint32_t hashes[4] = { 0, 0, 0, 0 };
926 ifp = &sc->arpcom.ac_if;
928 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
929 for (i = 0; i < 4; i++)
930 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
934 /* First, zot all the existing filters. */
935 for (i = 0; i < 4; i++)
936 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
938 /* Now program new ones. */
939 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
940 if (ifma->ifma_addr->sa_family != AF_LINK)
943 LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
944 ETHER_ADDR_LEN) & 0x7f;
945 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
948 for (i = 0; i < 4; i++)
949 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
953 * Do endian, PCI and DMA initialization. Also check the on-board ROM
957 bnx_chipinit(struct bnx_softc *sc)
959 uint32_t dma_rw_ctl, mode_ctl;
962 /* Set endian type before we access any non-PCI registers. */
963 pci_write_config(sc->bnx_dev, BGE_PCI_MISC_CTL,
964 BGE_INIT | BGE_PCIMISCCTL_TAGGED_STATUS, 4);
966 /* Clear the MAC control register */
967 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
970 * Clear the MAC statistics block in the NIC's
973 for (i = BGE_STATS_BLOCK;
974 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
975 BNX_MEMWIN_WRITE(sc, i, 0);
977 for (i = BGE_STATUS_BLOCK;
978 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
979 BNX_MEMWIN_WRITE(sc, i, 0);
981 if (BNX_IS_57765_FAMILY(sc)) {
984 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) {
985 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
986 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS;
988 /* Access the lower 1K of PL PCI-E block registers. */
989 CSR_WRITE_4(sc, BGE_MODE_CTL,
990 val | BGE_MODECTL_PCIE_PL_SEL);
992 val = CSR_READ_4(sc, BGE_PCIE_PL_LO_PHYCTL5);
993 val |= BGE_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ;
994 CSR_WRITE_4(sc, BGE_PCIE_PL_LO_PHYCTL5, val);
996 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
998 if (sc->bnx_chiprev != BGE_CHIPREV_57765_AX) {
999 /* Fix transmit hangs */
1000 val = CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL);
1001 val |= BGE_CPMU_PADRNG_CTL_RDIV2;
1002 CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL, val);
1004 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
1005 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS;
1007 /* Access the lower 1K of DL PCI-E block registers. */
1008 CSR_WRITE_4(sc, BGE_MODE_CTL,
1009 val | BGE_MODECTL_PCIE_DL_SEL);
1011 val = CSR_READ_4(sc, BGE_PCIE_DL_LO_FTSMAX);
1012 val &= ~BGE_PCIE_DL_LO_FTSMAX_MASK;
1013 val |= BGE_PCIE_DL_LO_FTSMAX_VAL;
1014 CSR_WRITE_4(sc, BGE_PCIE_DL_LO_FTSMAX, val);
1016 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1019 val = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK);
1020 val &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK;
1021 val |= BGE_CPMU_LSPD_10MB_MACCLK_6_25;
1022 CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, val);
1026 * Set up the PCI DMA control register.
1028 dma_rw_ctl = pci_read_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, 4);
1030 * Disable 32bytes cache alignment for DMA write to host memory
1033 * 64bytes cache alignment for DMA write to host memory is still
1036 dma_rw_ctl |= BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1037 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0)
1038 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1040 * Enable HW workaround for controllers that misinterpret
1041 * a status tag update and leave interrupts permanently
1044 if (sc->bnx_asicrev != BGE_ASICREV_BCM5717 &&
1045 sc->bnx_asicrev != BGE_ASICREV_BCM5762 &&
1046 !BNX_IS_57765_FAMILY(sc))
1047 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1049 if_printf(&sc->arpcom.ac_if, "DMA read/write %#x\n",
1052 pci_write_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1055 * Set up general mode register.
1057 mode_ctl = bnx_dma_swap_options(sc) | BGE_MODECTL_MAC_ATTN_INTR |
1058 BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM;
1059 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1062 * Disable memory write invalidate. Apparently it is not supported
1063 * properly by these devices. Also ensure that INTx isn't disabled,
1064 * as these chips need it even when using MSI.
1066 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_CMD,
1067 (PCIM_CMD_MWRICEN | PCIM_CMD_INTxDIS), 4);
1069 /* Set the timer prescaler (always 66Mhz) */
1070 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1076 bnx_blockinit(struct bnx_softc *sc)
1078 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0];
1079 struct bge_rcb *rcb;
1086 * Initialize the memory window pointer register so that
1087 * we can access the first 32K of internal NIC RAM. This will
1088 * allow us to set up the TX send ring RCBs and the RX return
1089 * ring RCBs, plus other things which live in NIC memory.
1091 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1093 /* Configure mbuf pool watermarks */
1094 if (BNX_IS_57765_PLUS(sc)) {
1095 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1096 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) {
1097 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1098 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1100 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1101 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1104 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1105 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1106 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1109 /* Configure DMA resource watermarks */
1110 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1111 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1113 /* Enable buffer manager */
1114 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1116 * Change the arbitration algorithm of TXMBUF read request to
1117 * round-robin instead of priority based for BCM5719. When
1118 * TXFIFO is almost empty, RDMA will hold its request until
1119 * TXFIFO is not almost empty.
1121 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719)
1122 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
1123 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1124 sc->bnx_chipid == BGE_CHIPID_BCM5719_A0 ||
1125 sc->bnx_chipid == BGE_CHIPID_BCM5720_A0)
1126 val |= BGE_BMANMODE_LOMBUF_ATTN;
1127 CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1129 /* Poll for buffer manager start indication */
1130 for (i = 0; i < BNX_TIMEOUT; i++) {
1131 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1136 if (i == BNX_TIMEOUT) {
1137 if_printf(&sc->arpcom.ac_if,
1138 "buffer manager failed to start\n");
1142 /* Enable flow-through queues */
1143 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1144 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1146 /* Wait until queue initialization is complete */
1147 for (i = 0; i < BNX_TIMEOUT; i++) {
1148 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1153 if (i == BNX_TIMEOUT) {
1154 if_printf(&sc->arpcom.ac_if,
1155 "flow-through queue init failed\n");
1160 * Summary of rings supported by the controller:
1162 * Standard Receive Producer Ring
1163 * - This ring is used to feed receive buffers for "standard"
1164 * sized frames (typically 1536 bytes) to the controller.
1166 * Jumbo Receive Producer Ring
1167 * - This ring is used to feed receive buffers for jumbo sized
1168 * frames (i.e. anything bigger than the "standard" frames)
1169 * to the controller.
1171 * Mini Receive Producer Ring
1172 * - This ring is used to feed receive buffers for "mini"
1173 * sized frames to the controller.
1174 * - This feature required external memory for the controller
1175 * but was never used in a production system. Should always
1178 * Receive Return Ring
1179 * - After the controller has placed an incoming frame into a
1180 * receive buffer that buffer is moved into a receive return
1181 * ring. The driver is then responsible to passing the
1182 * buffer up to the stack. Many versions of the controller
1183 * support multiple RR rings.
1186 * - This ring is used for outgoing frames. Many versions of
1187 * the controller support multiple send rings.
1190 /* Initialize the standard receive producer ring control block. */
1191 rcb = &sc->bnx_ldata.bnx_info.bnx_std_rx_rcb;
1192 rcb->bge_hostaddr.bge_addr_lo =
1193 BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_std_ring_paddr);
1194 rcb->bge_hostaddr.bge_addr_hi =
1195 BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_std_ring_paddr);
1196 if (BNX_IS_57765_PLUS(sc)) {
1198 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1199 * Bits 15-2 : Maximum RX frame size
1200 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
1203 rcb->bge_maxlen_flags =
1204 BGE_RCB_MAXLEN_FLAGS(512, BNX_MAX_FRAMELEN << 2);
1207 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1208 * Bits 15-2 : Reserved (should be 0)
1209 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1212 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1214 if (BNX_IS_5717_PLUS(sc))
1215 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1217 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1218 /* Write the standard receive producer ring control block. */
1219 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1220 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1221 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1222 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1223 /* Reset the standard receive producer ring producer index. */
1224 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1227 * Initialize the jumbo RX producer ring control
1228 * block. We set the 'ring disabled' bit in the
1229 * flags field until we're actually ready to start
1230 * using this ring (i.e. once we set the MTU
1231 * high enough to require it).
1233 if (BNX_IS_JUMBO_CAPABLE(sc)) {
1234 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb;
1235 /* Get the jumbo receive producer ring RCB parameters. */
1236 rcb->bge_hostaddr.bge_addr_lo =
1237 BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
1238 rcb->bge_hostaddr.bge_addr_hi =
1239 BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
1240 rcb->bge_maxlen_flags =
1241 BGE_RCB_MAXLEN_FLAGS(BNX_MAX_FRAMELEN,
1242 BGE_RCB_FLAG_RING_DISABLED);
1243 if (BNX_IS_5717_PLUS(sc))
1244 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1246 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1247 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1248 rcb->bge_hostaddr.bge_addr_hi);
1249 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1250 rcb->bge_hostaddr.bge_addr_lo);
1251 /* Program the jumbo receive producer ring RCB parameters. */
1252 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1253 rcb->bge_maxlen_flags);
1254 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1255 /* Reset the jumbo receive producer ring producer index. */
1256 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1260 * The BD ring replenish thresholds control how often the
1261 * hardware fetches new BD's from the producer rings in host
1262 * memory. Setting the value too low on a busy system can
1263 * starve the hardware and recue the throughpout.
1265 * Set the BD ring replentish thresholds. The recommended
1266 * values are 1/8th the number of descriptors allocated to
1270 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1271 if (BNX_IS_JUMBO_CAPABLE(sc)) {
1272 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1273 BGE_JUMBO_RX_RING_CNT/8);
1275 if (BNX_IS_57765_PLUS(sc)) {
1276 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1277 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1281 * Disable all send rings by setting the 'ring disabled' bit
1282 * in the flags field of all the TX send ring control blocks,
1283 * located in NIC memory.
1285 if (BNX_IS_5717_PLUS(sc))
1287 else if (BNX_IS_57765_FAMILY(sc) ||
1288 sc->bnx_asicrev == BGE_ASICREV_BCM5762)
1292 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1293 for (i = 0; i < limit; i++) {
1294 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1295 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1296 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1297 vrcb += sizeof(struct bge_rcb);
1300 /* Configure send ring RCB 0 (we use only the first ring) */
1301 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1302 BGE_HOSTADDR(taddr, txr->bnx_tx_ring_paddr);
1303 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1304 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1305 if (BNX_IS_5717_PLUS(sc)) {
1306 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
1308 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1309 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1311 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1312 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1315 * Disable all receive return rings by setting the
1316 * 'ring disabled' bit in the flags field of all the receive
1317 * return ring control blocks, located in NIC memory.
1319 if (BNX_IS_5717_PLUS(sc)) {
1320 /* Should be 17, use 16 until we get an SRAM map. */
1322 } else if (BNX_IS_57765_FAMILY(sc) ||
1323 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1328 /* Disable all receive return rings. */
1329 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1330 for (i = 0; i < limit; i++) {
1331 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1332 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1333 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1334 BGE_RCB_FLAG_RING_DISABLED);
1335 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1336 bnx_writembx(sc, BGE_MBX_RX_CONS0_LO +
1337 (i * (sizeof(uint64_t))), 0);
1338 vrcb += sizeof(struct bge_rcb);
1342 * Set up receive return ring 0. Note that the NIC address
1343 * for RX return rings is 0x0. The return rings live entirely
1344 * within the host, so the nicaddr field in the RCB isn't used.
1346 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1347 BGE_HOSTADDR(taddr, sc->bnx_ldata.bnx_rx_return_ring_paddr);
1348 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1349 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1350 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1351 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1352 BGE_RCB_MAXLEN_FLAGS(BNX_RETURN_RING_CNT, 0));
1354 /* Set random backoff seed for TX */
1355 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1356 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1357 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1358 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1359 BGE_TX_BACKOFF_SEED_MASK);
1361 /* Set inter-packet gap */
1363 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
1364 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1365 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
1366 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
1368 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
1371 * Specify which ring to use for packets that don't match
1374 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1377 * Configure number of RX lists. One interrupt distribution
1378 * list, sixteen active lists, one bad frames class.
1380 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1382 /* Inialize RX list placement stats mask. */
1383 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1384 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1386 /* Disable host coalescing until we get it set up */
1387 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1389 /* Poll to make sure it's shut down. */
1390 for (i = 0; i < BNX_TIMEOUT; i++) {
1391 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1396 if (i == BNX_TIMEOUT) {
1397 if_printf(&sc->arpcom.ac_if,
1398 "host coalescing engine failed to idle\n");
1402 /* Set up host coalescing defaults */
1403 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bnx_rx_coal_ticks);
1404 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bnx_tx_coal_ticks);
1405 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bnx_rx_coal_bds);
1406 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bnx_tx_coal_bds);
1407 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, sc->bnx_rx_coal_bds_int);
1408 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, sc->bnx_tx_coal_bds_int);
1410 /* Set up address of status block */
1411 bzero(sc->bnx_ldata.bnx_status_block, BGE_STATUS_BLK_SZ);
1412 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1413 BGE_ADDR_HI(sc->bnx_ldata.bnx_status_block_paddr));
1414 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1415 BGE_ADDR_LO(sc->bnx_ldata.bnx_status_block_paddr));
1417 /* Set up status block partail update size. */
1418 val = BGE_STATBLKSZ_32BYTE;
1421 * Does not seem to have visible effect in both
1422 * bulk data (1472B UDP datagram) and tiny data
1423 * (18B UDP datagram) TX tests.
1425 val |= BGE_HCCMODE_CLRTICK_TX;
1427 /* Turn on host coalescing state machine */
1428 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1430 /* Turn on RX BD completion state machine and enable attentions */
1431 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1432 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1434 /* Turn on RX list placement state machine */
1435 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1437 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1438 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1439 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1440 BGE_MACMODE_FRMHDR_DMA_ENB;
1442 if (sc->bnx_flags & BNX_FLAG_TBI)
1443 val |= BGE_PORTMODE_TBI;
1444 else if (sc->bnx_flags & BNX_FLAG_MII_SERDES)
1445 val |= BGE_PORTMODE_GMII;
1447 val |= BGE_PORTMODE_MII;
1449 /* Turn on DMA, clear stats */
1450 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1452 /* Set misc. local control, enable interrupts on attentions */
1453 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1456 /* Assert GPIO pins for PHY reset */
1457 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1458 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1459 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1460 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1463 /* Turn on write DMA state machine */
1464 val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1465 /* Enable host coalescing bug fix. */
1466 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1467 if (sc->bnx_asicrev == BGE_ASICREV_BCM5785) {
1468 /* Request larger DMA burst size to get better performance. */
1469 val |= BGE_WDMAMODE_BURST_ALL_DATA;
1471 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1474 if (BNX_IS_57765_PLUS(sc)) {
1475 uint32_t dmactl, dmactl_reg;
1477 if (sc->bnx_asicrev == BGE_ASICREV_BCM5762)
1478 dmactl_reg = BGE_RDMA_RSRVCTRL2;
1480 dmactl_reg = BGE_RDMA_RSRVCTRL;
1482 dmactl = CSR_READ_4(sc, dmactl_reg);
1484 * Adjust tx margin to prevent TX data corruption and
1485 * fix internal FIFO overflow.
1487 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1488 sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
1489 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1490 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
1491 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
1492 BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
1493 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
1494 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
1495 BGE_RDMA_RSRVCTRL_TXMRGN_320B;
1498 * Enable fix for read DMA FIFO overruns.
1499 * The fix is to limit the number of RX BDs
1500 * the hardware would fetch at a fime.
1502 CSR_WRITE_4(sc, dmactl_reg,
1503 dmactl | BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
1506 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) {
1507 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
1508 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
1509 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
1510 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
1511 } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
1512 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1515 if (sc->bnx_asicrev == BGE_ASICREV_BCM5762)
1516 ctrl_reg = BGE_RDMA_LSO_CRPTEN_CTRL2;
1518 ctrl_reg = BGE_RDMA_LSO_CRPTEN_CTRL;
1521 * Allow 4KB burst length reads for non-LSO frames.
1522 * Enable 512B burst length reads for buffer descriptors.
1524 CSR_WRITE_4(sc, ctrl_reg,
1525 CSR_READ_4(sc, ctrl_reg) |
1526 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
1527 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
1530 /* Turn on read DMA state machine */
1531 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1532 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717)
1533 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
1534 if (sc->bnx_asicrev == BGE_ASICREV_BCM5784 ||
1535 sc->bnx_asicrev == BGE_ASICREV_BCM5785 ||
1536 sc->bnx_asicrev == BGE_ASICREV_BCM57780) {
1537 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1538 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1539 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1541 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
1542 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1543 val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
1544 BGE_RDMAMODE_H2BNC_VLAN_DET;
1546 * Allow multiple outstanding read requests from
1547 * non-LSO read DMA engine.
1549 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
1551 if (sc->bnx_asicrev == BGE_ASICREV_BCM57766)
1552 val |= BGE_RDMAMODE_JMB_2K_MMRR;
1553 if (sc->bnx_flags & BNX_FLAG_TSO)
1554 val |= BGE_RDMAMODE_TSO4_ENABLE;
1555 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1556 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1559 /* Turn on RX data completion state machine */
1560 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1562 /* Turn on RX BD initiator state machine */
1563 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1565 /* Turn on RX data and RX BD initiator state machine */
1566 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1568 /* Turn on send BD completion state machine */
1569 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1571 /* Turn on send data completion state machine */
1572 val = BGE_SDCMODE_ENABLE;
1573 if (sc->bnx_asicrev == BGE_ASICREV_BCM5761)
1574 val |= BGE_SDCMODE_CDELAY;
1575 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1577 /* Turn on send data initiator state machine */
1578 if (sc->bnx_flags & BNX_FLAG_TSO) {
1579 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
1580 BGE_SDIMODE_HW_LSO_PRE_DMA);
1582 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1585 /* Turn on send BD initiator state machine */
1586 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1588 /* Turn on send BD selector state machine */
1589 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1591 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1592 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1593 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1595 /* ack/clear link change events */
1596 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1597 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1598 BGE_MACSTAT_LINK_CHANGED);
1599 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1602 * Enable attention when the link has changed state for
1603 * devices that use auto polling.
1605 if (sc->bnx_flags & BNX_FLAG_TBI) {
1606 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1608 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
1609 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
1615 * Clear any pending link state attention.
1616 * Otherwise some link state change events may be lost until attention
1617 * is cleared by bnx_intr() -> bnx_softc.bnx_link_upd() sequence.
1618 * It's not necessary on newer BCM chips - perhaps enabling link
1619 * state change attentions implies clearing pending attention.
1621 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1622 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1623 BGE_MACSTAT_LINK_CHANGED);
1625 /* Enable link state change attentions. */
1626 BNX_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1632 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1633 * against our list and return its name if we find a match. Note
1634 * that since the Broadcom controller contains VPD support, we
1635 * can get the device name string from the controller itself instead
1636 * of the compiled-in string. This is a little slow, but it guarantees
1637 * we'll always announce the right product name.
1640 bnx_probe(device_t dev)
1642 const struct bnx_type *t;
1643 uint16_t product, vendor;
1645 if (!pci_is_pcie(dev))
1648 product = pci_get_device(dev);
1649 vendor = pci_get_vendor(dev);
1651 for (t = bnx_devs; t->bnx_name != NULL; t++) {
1652 if (vendor == t->bnx_vid && product == t->bnx_did)
1655 if (t->bnx_name == NULL)
1658 device_set_desc(dev, t->bnx_name);
1663 bnx_attach(device_t dev)
1666 struct bnx_softc *sc;
1668 int error = 0, rid, capmask;
1669 uint8_t ether_addr[ETHER_ADDR_LEN];
1671 driver_intr_t *intr_func;
1672 uintptr_t mii_priv = 0;
1674 #ifdef BNX_TSO_DEBUG
1679 sc = device_get_softc(dev);
1681 callout_init_mp(&sc->bnx_stat_timer);
1682 callout_init_mp(&sc->bnx_intr_timer);
1683 lwkt_serialize_init(&sc->bnx_jslot_serializer);
1685 product = pci_get_device(dev);
1687 #ifndef BURN_BRIDGES
1688 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1691 irq = pci_read_config(dev, PCIR_INTLINE, 4);
1692 mem = pci_read_config(dev, BGE_PCI_BAR0, 4);
1694 device_printf(dev, "chip is in D%d power mode "
1695 "-- setting to D0\n", pci_get_powerstate(dev));
1697 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1699 pci_write_config(dev, PCIR_INTLINE, irq, 4);
1700 pci_write_config(dev, BGE_PCI_BAR0, mem, 4);
1702 #endif /* !BURN_BRIDGE */
1705 * Map control/status registers.
1707 pci_enable_busmaster(dev);
1710 sc->bnx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1713 if (sc->bnx_res == NULL) {
1714 device_printf(dev, "couldn't map memory\n");
1718 sc->bnx_btag = rman_get_bustag(sc->bnx_res);
1719 sc->bnx_bhandle = rman_get_bushandle(sc->bnx_res);
1721 /* Save various chip information */
1723 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
1724 BGE_PCIMISCCTL_ASICREV_SHIFT;
1725 if (BGE_ASICREV(sc->bnx_chipid) == BGE_ASICREV_USE_PRODID_REG) {
1726 /* All chips having dedicated ASICREV register have CPMU */
1727 sc->bnx_flags |= BNX_FLAG_CPMU;
1730 case PCI_PRODUCT_BROADCOM_BCM5717:
1731 case PCI_PRODUCT_BROADCOM_BCM5717C:
1732 case PCI_PRODUCT_BROADCOM_BCM5718:
1733 case PCI_PRODUCT_BROADCOM_BCM5719:
1734 case PCI_PRODUCT_BROADCOM_BCM5720_ALT:
1735 case PCI_PRODUCT_BROADCOM_BCM5725:
1736 case PCI_PRODUCT_BROADCOM_BCM5727:
1737 case PCI_PRODUCT_BROADCOM_BCM5762:
1738 sc->bnx_chipid = pci_read_config(dev,
1739 BGE_PCI_GEN2_PRODID_ASICREV, 4);
1742 case PCI_PRODUCT_BROADCOM_BCM57761:
1743 case PCI_PRODUCT_BROADCOM_BCM57762:
1744 case PCI_PRODUCT_BROADCOM_BCM57765:
1745 case PCI_PRODUCT_BROADCOM_BCM57766:
1746 case PCI_PRODUCT_BROADCOM_BCM57781:
1747 case PCI_PRODUCT_BROADCOM_BCM57782:
1748 case PCI_PRODUCT_BROADCOM_BCM57785:
1749 case PCI_PRODUCT_BROADCOM_BCM57786:
1750 case PCI_PRODUCT_BROADCOM_BCM57791:
1751 case PCI_PRODUCT_BROADCOM_BCM57795:
1752 sc->bnx_chipid = pci_read_config(dev,
1753 BGE_PCI_GEN15_PRODID_ASICREV, 4);
1757 sc->bnx_chipid = pci_read_config(dev,
1758 BGE_PCI_PRODID_ASICREV, 4);
1762 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_C0)
1763 sc->bnx_chipid = BGE_CHIPID_BCM5720_A0;
1765 sc->bnx_asicrev = BGE_ASICREV(sc->bnx_chipid);
1766 sc->bnx_chiprev = BGE_CHIPREV(sc->bnx_chipid);
1768 switch (sc->bnx_asicrev) {
1769 case BGE_ASICREV_BCM5717:
1770 case BGE_ASICREV_BCM5719:
1771 case BGE_ASICREV_BCM5720:
1772 sc->bnx_flags |= BNX_FLAG_5717_PLUS | BNX_FLAG_57765_PLUS;
1775 case BGE_ASICREV_BCM5762:
1776 sc->bnx_flags |= BNX_FLAG_57765_PLUS;
1779 case BGE_ASICREV_BCM57765:
1780 case BGE_ASICREV_BCM57766:
1781 sc->bnx_flags |= BNX_FLAG_57765_FAMILY | BNX_FLAG_57765_PLUS;
1784 sc->bnx_flags |= BNX_FLAG_SHORTDMA;
1786 sc->bnx_flags |= BNX_FLAG_TSO;
1787 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 &&
1788 sc->bnx_chipid == BGE_CHIPID_BCM5719_A0)
1789 sc->bnx_flags &= ~BNX_FLAG_TSO;
1791 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1792 BNX_IS_57765_FAMILY(sc)) {
1794 * All BCM57785 and BCM5718 families chips have a bug that
1795 * under certain situation interrupt will not be enabled
1796 * even if status tag is written to BGE_MBX_IRQ0_LO mailbox.
1798 * While BCM5719 and BCM5720 have a hardware workaround
1799 * which could fix the above bug.
1800 * See the comment near BGE_PCIDMARWCTL_TAGGED_STATUS_WA in
1803 * For the rest of the chips in these two families, we will
1804 * have to poll the status block at high rate (10ms currently)
1805 * to check whether the interrupt is hosed or not.
1806 * See bnx_intr_check() for details.
1808 sc->bnx_flags |= BNX_FLAG_STATUSTAG_BUG;
1811 sc->bnx_pciecap = pci_get_pciecap_ptr(sc->bnx_dev);
1812 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1813 sc->bnx_asicrev == BGE_ASICREV_BCM5720)
1814 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_2048);
1816 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096);
1817 device_printf(dev, "CHIP ID 0x%08x; "
1818 "ASIC REV 0x%02x; CHIP REV 0x%02x\n",
1819 sc->bnx_chipid, sc->bnx_asicrev, sc->bnx_chiprev);
1822 * Set various PHY quirk flags.
1825 capmask = MII_CAPMASK_DEFAULT;
1826 if (product == PCI_PRODUCT_BROADCOM_BCM57791 ||
1827 product == PCI_PRODUCT_BROADCOM_BCM57795) {
1829 capmask &= ~BMSR_EXTSTAT;
1832 mii_priv |= BRGPHY_FLAG_WIRESPEED;
1833 if (sc->bnx_chipid == BGE_CHIPID_BCM5762_A0)
1834 mii_priv |= BRGPHY_FLAG_5762_A0;
1836 /* Initialize if_name earlier, so if_printf could be used */
1837 ifp = &sc->arpcom.ac_if;
1838 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1840 /* Try to reset the chip. */
1843 if (bnx_chipinit(sc)) {
1844 device_printf(dev, "chip initialization failed\n");
1850 * Get station address
1852 error = bnx_get_eaddr(sc, ether_addr);
1854 device_printf(dev, "failed to read station address\n");
1859 sc->bnx_tx_ringcnt = 1;
1861 error = bnx_dma_alloc(sc);
1866 * Allocate interrupt
1868 sc->bnx_irq_type = pci_alloc_1intr(dev, bnx_msi_enable, &sc->bnx_irq_rid,
1871 sc->bnx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->bnx_irq_rid,
1873 if (sc->bnx_irq == NULL) {
1874 device_printf(dev, "couldn't map interrupt\n");
1879 if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) {
1880 sc->bnx_flags |= BNX_FLAG_ONESHOT_MSI;
1884 /* Set default tuneable values. */
1885 sc->bnx_rx_coal_ticks = BNX_RX_COAL_TICKS_DEF;
1886 sc->bnx_tx_coal_ticks = BNX_TX_COAL_TICKS_DEF;
1887 sc->bnx_rx_coal_bds = BNX_RX_COAL_BDS_DEF;
1888 sc->bnx_tx_coal_bds = BNX_TX_COAL_BDS_DEF;
1889 sc->bnx_rx_coal_bds_int = BNX_RX_COAL_BDS_INT_DEF;
1890 sc->bnx_tx_coal_bds_int = BNX_TX_COAL_BDS_INT_DEF;
1892 /* Set up ifnet structure */
1894 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1895 ifp->if_ioctl = bnx_ioctl;
1896 ifp->if_start = bnx_start;
1897 #ifdef IFPOLL_ENABLE
1898 ifp->if_npoll = bnx_npoll;
1900 ifp->if_watchdog = bnx_watchdog;
1901 ifp->if_init = bnx_init;
1902 ifp->if_mtu = ETHERMTU;
1903 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1904 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1905 ifq_set_ready(&ifp->if_snd);
1907 ifp->if_capabilities |= IFCAP_HWCSUM;
1908 ifp->if_hwassist = BNX_CSUM_FEATURES;
1909 if (sc->bnx_flags & BNX_FLAG_TSO) {
1910 ifp->if_capabilities |= IFCAP_TSO;
1911 ifp->if_hwassist |= CSUM_TSO;
1913 ifp->if_capenable = ifp->if_capabilities;
1916 * Figure out what sort of media we have by checking the
1917 * hardware config word in the first 32k of NIC internal memory,
1918 * or fall back to examining the EEPROM if necessary.
1919 * Note: on some BCM5700 cards, this value appears to be unset.
1920 * If that's the case, we have to rely on identifying the NIC
1921 * by its PCI subsystem ID, as we do below for the SysKonnect
1924 if (bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) {
1925 hwcfg = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
1927 if (bnx_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
1929 device_printf(dev, "failed to read EEPROM\n");
1933 hwcfg = ntohl(hwcfg);
1936 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
1937 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41 ||
1938 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
1939 sc->bnx_flags |= BNX_FLAG_TBI;
1942 if (sc->bnx_flags & BNX_FLAG_CPMU)
1943 sc->bnx_mi_mode = BGE_MIMODE_500KHZ_CONST;
1945 sc->bnx_mi_mode = BGE_MIMODE_BASE;
1947 /* Setup link status update stuffs */
1948 if (sc->bnx_flags & BNX_FLAG_TBI) {
1949 sc->bnx_link_upd = bnx_tbi_link_upd;
1950 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
1951 } else if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
1952 sc->bnx_link_upd = bnx_autopoll_link_upd;
1953 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
1955 sc->bnx_link_upd = bnx_copper_link_upd;
1956 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
1959 /* Set default PHY address */
1963 * PHY address mapping for various devices.
1965 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
1966 * ---------+-------+-------+-------+-------+
1967 * BCM57XX | 1 | X | X | X |
1968 * BCM5704 | 1 | X | 1 | X |
1969 * BCM5717 | 1 | 8 | 2 | 9 |
1970 * BCM5719 | 1 | 8 | 2 | 9 |
1971 * BCM5720 | 1 | 8 | 2 | 9 |
1973 * Other addresses may respond but they are not
1974 * IEEE compliant PHYs and should be ignored.
1976 if (BNX_IS_5717_PLUS(sc)) {
1979 f = pci_get_function(dev);
1980 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_A0) {
1981 if (CSR_READ_4(sc, BGE_SGDIG_STS) &
1982 BGE_SGDIGSTS_IS_SERDES)
1983 sc->bnx_phyno = f + 8;
1985 sc->bnx_phyno = f + 1;
1987 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
1988 BGE_CPMU_PHY_STRAP_IS_SERDES)
1989 sc->bnx_phyno = f + 8;
1991 sc->bnx_phyno = f + 1;
1995 if (sc->bnx_flags & BNX_FLAG_TBI) {
1996 ifmedia_init(&sc->bnx_ifmedia, IFM_IMASK,
1997 bnx_ifmedia_upd, bnx_ifmedia_sts);
1998 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
1999 ifmedia_add(&sc->bnx_ifmedia,
2000 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2001 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2002 ifmedia_set(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO);
2003 sc->bnx_ifmedia.ifm_media = sc->bnx_ifmedia.ifm_cur->ifm_media;
2005 struct mii_probe_args mii_args;
2007 mii_probe_args_init(&mii_args, bnx_ifmedia_upd, bnx_ifmedia_sts);
2008 mii_args.mii_probemask = 1 << sc->bnx_phyno;
2009 mii_args.mii_capmask = capmask;
2010 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY;
2011 mii_args.mii_priv = mii_priv;
2013 error = mii_probe(dev, &sc->bnx_miibus, &mii_args);
2015 device_printf(dev, "MII without any PHY!\n");
2021 * Create sysctl nodes.
2023 sysctl_ctx_init(&sc->bnx_sysctl_ctx);
2024 sc->bnx_sysctl_tree = SYSCTL_ADD_NODE(&sc->bnx_sysctl_ctx,
2025 SYSCTL_STATIC_CHILDREN(_hw),
2027 device_get_nameunit(dev),
2029 if (sc->bnx_sysctl_tree == NULL) {
2030 device_printf(dev, "can't add sysctl node\n");
2035 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2036 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2037 OID_AUTO, "rx_coal_ticks",
2038 CTLTYPE_INT | CTLFLAG_RW,
2039 sc, 0, bnx_sysctl_rx_coal_ticks, "I",
2040 "Receive coalescing ticks (usec).");
2041 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2042 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2043 OID_AUTO, "tx_coal_ticks",
2044 CTLTYPE_INT | CTLFLAG_RW,
2045 sc, 0, bnx_sysctl_tx_coal_ticks, "I",
2046 "Transmit coalescing ticks (usec).");
2047 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2048 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2049 OID_AUTO, "rx_coal_bds",
2050 CTLTYPE_INT | CTLFLAG_RW,
2051 sc, 0, bnx_sysctl_rx_coal_bds, "I",
2052 "Receive max coalesced BD count.");
2053 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2054 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2055 OID_AUTO, "tx_coal_bds",
2056 CTLTYPE_INT | CTLFLAG_RW,
2057 sc, 0, bnx_sysctl_tx_coal_bds, "I",
2058 "Transmit max coalesced BD count.");
2060 * A common design characteristic for many Broadcom
2061 * client controllers is that they only support a
2062 * single outstanding DMA read operation on the PCIe
2063 * bus. This means that it will take twice as long to
2064 * fetch a TX frame that is split into header and
2065 * payload buffers as it does to fetch a single,
2066 * contiguous TX frame (2 reads vs. 1 read). For these
2067 * controllers, coalescing buffers to reduce the number
2068 * of memory reads is effective way to get maximum
2069 * performance(about 940Mbps). Without collapsing TX
2070 * buffers the maximum TCP bulk transfer performance
2071 * is about 850Mbps. However forcing coalescing mbufs
2072 * consumes a lot of CPU cycles, so leave it off by
2075 SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx,
2076 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2077 "force_defrag", CTLFLAG_RW, &sc->bnx_force_defrag, 0,
2078 "Force defragment on TX path");
2080 SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx,
2081 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2082 "tx_wreg", CTLFLAG_RW, &sc->bnx_tx_ring[0].bnx_tx_wreg, 0,
2083 "# of segments before writing to hardware register");
2085 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2086 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2087 "rx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2088 sc, 0, bnx_sysctl_rx_coal_bds_int, "I",
2089 "Receive max coalesced BD count during interrupt.");
2090 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2091 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2092 "tx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2093 sc, 0, bnx_sysctl_tx_coal_bds_int, "I",
2094 "Transmit max coalesced BD count during interrupt.");
2096 #ifdef BNX_TSO_DEBUG
2097 for (i = 0; i < BNX_TSO_NSTATS; ++i) {
2098 ksnprintf(desc, sizeof(desc), "tso%d", i + 1);
2099 SYSCTL_ADD_ULONG(&sc->bnx_sysctl_ctx,
2100 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2101 desc, CTLFLAG_RW, &sc->bnx_tsosegs[i], "");
2106 * Call MI attach routine.
2108 ether_ifattach(ifp, ether_addr, NULL);
2110 ifq_set_cpuid(&ifp->if_snd, sc->bnx_intr_cpuid);
2112 #ifdef IFPOLL_ENABLE
2113 ifpoll_compat_setup(&sc->bnx_npoll,
2114 &sc->bnx_sysctl_ctx, sc->bnx_sysctl_tree,
2115 device_get_unit(dev), ifp->if_serializer);
2118 if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) {
2119 if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) {
2120 intr_func = bnx_msi_oneshot;
2122 device_printf(dev, "oneshot MSI\n");
2124 intr_func = bnx_msi;
2127 intr_func = bnx_intr_legacy;
2129 error = bus_setup_intr(dev, sc->bnx_irq, INTR_MPSAFE, intr_func, sc,
2130 &sc->bnx_intrhand, ifp->if_serializer);
2132 ether_ifdetach(ifp);
2133 device_printf(dev, "couldn't set up irq\n");
2137 sc->bnx_intr_cpuid = rman_get_cpuid(sc->bnx_irq);
2138 sc->bnx_stat_cpuid = sc->bnx_intr_cpuid;
2147 bnx_detach(device_t dev)
2149 struct bnx_softc *sc = device_get_softc(dev);
2151 if (device_is_attached(dev)) {
2152 struct ifnet *ifp = &sc->arpcom.ac_if;
2154 lwkt_serialize_enter(ifp->if_serializer);
2157 bus_teardown_intr(dev, sc->bnx_irq, sc->bnx_intrhand);
2158 lwkt_serialize_exit(ifp->if_serializer);
2160 ether_ifdetach(ifp);
2163 if (sc->bnx_flags & BNX_FLAG_TBI)
2164 ifmedia_removeall(&sc->bnx_ifmedia);
2166 device_delete_child(dev, sc->bnx_miibus);
2167 bus_generic_detach(dev);
2169 if (sc->bnx_irq != NULL) {
2170 bus_release_resource(dev, SYS_RES_IRQ, sc->bnx_irq_rid,
2173 if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI)
2174 pci_release_msi(dev);
2176 if (sc->bnx_res != NULL) {
2177 bus_release_resource(dev, SYS_RES_MEMORY,
2178 BGE_PCI_BAR0, sc->bnx_res);
2181 if (sc->bnx_sysctl_tree != NULL)
2182 sysctl_ctx_free(&sc->bnx_sysctl_ctx);
2190 bnx_reset(struct bnx_softc *sc)
2193 uint32_t cachesize, command, pcistate, reset;
2194 void (*write_op)(struct bnx_softc *, uint32_t, uint32_t);
2200 write_op = bnx_writemem_direct;
2202 /* Save some important PCI state. */
2203 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2204 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2205 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2207 pci_write_config(dev, BGE_PCI_MISC_CTL,
2208 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2209 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2210 BGE_PCIMISCCTL_TAGGED_STATUS, 4);
2212 /* Disable fastboot on controllers that support it. */
2214 if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n");
2215 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2218 * Write the magic number to SRAM at offset 0xB50.
2219 * When firmware finishes its initialization it will
2220 * write ~BGE_MAGIC_NUMBER to the same location.
2222 bnx_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2224 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2226 /* XXX: Broadcom Linux driver. */
2227 /* Force PCI-E 1.0a mode */
2228 if (!BNX_IS_57765_PLUS(sc) &&
2229 CSR_READ_4(sc, BGE_PCIE_PHY_TSTCTL) ==
2230 (BGE_PCIE_PHY_TSTCTL_PSCRAM |
2231 BGE_PCIE_PHY_TSTCTL_PCIE10)) {
2232 CSR_WRITE_4(sc, BGE_PCIE_PHY_TSTCTL,
2233 BGE_PCIE_PHY_TSTCTL_PSCRAM);
2235 if (sc->bnx_chipid != BGE_CHIPID_BCM5750_A0) {
2236 /* Prevent PCIE link training during global reset */
2237 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2242 * Set GPHY Power Down Override to leave GPHY
2243 * powered up in D0 uninitialized.
2245 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0)
2246 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
2248 /* Issue global reset */
2249 write_op(sc, BGE_MISC_CFG, reset);
2253 /* XXX: Broadcom Linux driver. */
2254 if (sc->bnx_chipid == BGE_CHIPID_BCM5750_A0) {
2257 DELAY(500000); /* wait for link training to complete */
2258 v = pci_read_config(dev, 0xc4, 4);
2259 pci_write_config(dev, 0xc4, v | (1<<15), 4);
2262 devctl = pci_read_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL, 2);
2264 /* Disable no snoop and disable relaxed ordering. */
2265 devctl &= ~(PCIEM_DEVCTL_RELAX_ORDER | PCIEM_DEVCTL_NOSNOOP);
2267 /* Old PCI-E chips only support 128 bytes Max PayLoad Size. */
2268 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0) {
2269 devctl &= ~PCIEM_DEVCTL_MAX_PAYLOAD_MASK;
2270 devctl |= PCIEM_DEVCTL_MAX_PAYLOAD_128;
2273 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL,
2276 /* Clear error status. */
2277 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVSTS,
2278 PCIEM_DEVSTS_CORR_ERR |
2279 PCIEM_DEVSTS_NFATAL_ERR |
2280 PCIEM_DEVSTS_FATAL_ERR |
2281 PCIEM_DEVSTS_UNSUPP_REQ, 2);
2283 /* Reset some of the PCI state that got zapped by reset */
2284 pci_write_config(dev, BGE_PCI_MISC_CTL,
2285 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2286 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2287 BGE_PCIMISCCTL_TAGGED_STATUS, 4);
2288 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2289 pci_write_config(dev, BGE_PCI_CMD, command, 4);
2290 write_op(sc, BGE_MISC_CFG, (65 << 1));
2292 /* Enable memory arbiter */
2293 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2296 * Poll until we see the 1's complement of the magic number.
2297 * This indicates that the firmware initialization is complete.
2299 for (i = 0; i < BNX_FIRMWARE_TIMEOUT; i++) {
2300 val = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2301 if (val == ~BGE_MAGIC_NUMBER)
2305 if (i == BNX_FIRMWARE_TIMEOUT) {
2306 if_printf(&sc->arpcom.ac_if, "firmware handshake "
2307 "timed out, found 0x%08x\n", val);
2310 /* BCM57765 A0 needs additional time before accessing. */
2311 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0)
2315 * XXX Wait for the value of the PCISTATE register to
2316 * return to its original pre-reset state. This is a
2317 * fairly good indicator of reset completion. If we don't
2318 * wait for the reset to fully complete, trying to read
2319 * from the device's non-PCI registers may yield garbage
2322 for (i = 0; i < BNX_TIMEOUT; i++) {
2323 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2328 /* Fix up byte swapping */
2329 CSR_WRITE_4(sc, BGE_MODE_CTL, bnx_dma_swap_options(sc));
2331 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2334 * The 5704 in TBI mode apparently needs some special
2335 * adjustment to insure the SERDES drive level is set
2338 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704 &&
2339 (sc->bnx_flags & BNX_FLAG_TBI)) {
2342 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2343 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2344 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2347 CSR_WRITE_4(sc, BGE_MI_MODE,
2348 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
2351 /* XXX: Broadcom Linux driver. */
2352 if (!BNX_IS_57765_PLUS(sc)) {
2355 /* Enable Data FIFO protection. */
2356 v = CSR_READ_4(sc, BGE_PCIE_TLDLPL_PORT);
2357 CSR_WRITE_4(sc, BGE_PCIE_TLDLPL_PORT, v | (1 << 25));
2362 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
2363 BNX_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
2364 CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
2369 * Frame reception handling. This is called if there's a frame
2370 * on the receive return list.
2372 * Note: we have to be able to handle two possibilities here:
2373 * 1) the frame is from the jumbo recieve ring
2374 * 2) the frame is from the standard receive ring
2378 bnx_rxeof(struct bnx_softc *sc, uint16_t rx_prod, int count)
2381 int stdcnt = 0, jumbocnt = 0;
2383 ifp = &sc->arpcom.ac_if;
2385 while (sc->bnx_rx_saved_considx != rx_prod && count != 0) {
2386 struct bge_rx_bd *cur_rx;
2388 struct mbuf *m = NULL;
2389 uint16_t vlan_tag = 0;
2395 &sc->bnx_ldata.bnx_rx_return_ring[sc->bnx_rx_saved_considx];
2397 rxidx = cur_rx->bge_idx;
2398 BNX_INC(sc->bnx_rx_saved_considx, BNX_RETURN_RING_CNT);
2400 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2402 vlan_tag = cur_rx->bge_vlan_tag;
2405 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2406 BNX_INC(sc->bnx_jumbo, BGE_JUMBO_RX_RING_CNT);
2409 if (rxidx != sc->bnx_jumbo) {
2410 IFNET_STAT_INC(ifp, ierrors, 1);
2411 if_printf(ifp, "sw jumbo index(%d) "
2412 "and hw jumbo index(%d) mismatch, drop!\n",
2413 sc->bnx_jumbo, rxidx);
2414 bnx_setup_rxdesc_jumbo(sc, rxidx);
2418 m = sc->bnx_cdata.bnx_rx_jumbo_chain[rxidx].bnx_mbuf;
2419 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2420 IFNET_STAT_INC(ifp, ierrors, 1);
2421 bnx_setup_rxdesc_jumbo(sc, sc->bnx_jumbo);
2424 if (bnx_newbuf_jumbo(sc, sc->bnx_jumbo, 0)) {
2425 IFNET_STAT_INC(ifp, ierrors, 1);
2426 bnx_setup_rxdesc_jumbo(sc, sc->bnx_jumbo);
2430 BNX_INC(sc->bnx_std, BGE_STD_RX_RING_CNT);
2433 if (rxidx != sc->bnx_std) {
2434 IFNET_STAT_INC(ifp, ierrors, 1);
2435 if_printf(ifp, "sw std index(%d) "
2436 "and hw std index(%d) mismatch, drop!\n",
2437 sc->bnx_std, rxidx);
2438 bnx_setup_rxdesc_std(sc, rxidx);
2442 m = sc->bnx_cdata.bnx_rx_std_chain[rxidx].bnx_mbuf;
2443 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2444 IFNET_STAT_INC(ifp, ierrors, 1);
2445 bnx_setup_rxdesc_std(sc, sc->bnx_std);
2448 if (bnx_newbuf_std(sc, sc->bnx_std, 0)) {
2449 IFNET_STAT_INC(ifp, ierrors, 1);
2450 bnx_setup_rxdesc_std(sc, sc->bnx_std);
2455 IFNET_STAT_INC(ifp, ipackets, 1);
2456 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2457 m->m_pkthdr.rcvif = ifp;
2459 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2460 (cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
2461 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2462 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2463 if ((cur_rx->bge_error_flag &
2464 BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
2465 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2467 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2468 m->m_pkthdr.csum_data =
2469 cur_rx->bge_tcp_udp_csum;
2470 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
2476 * If we received a packet with a vlan tag, pass it
2477 * to vlan_input() instead of ether_input().
2480 m->m_flags |= M_VLANTAG;
2481 m->m_pkthdr.ether_vlantag = vlan_tag;
2483 ifp->if_input(ifp, m);
2486 bnx_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bnx_rx_saved_considx);
2488 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bnx_std);
2490 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo);
2494 bnx_txeof(struct bnx_tx_ring *txr, uint16_t tx_cons)
2496 struct ifnet *ifp = &txr->bnx_sc->arpcom.ac_if;
2499 * Go through our tx ring and free mbufs for those
2500 * frames that have been sent.
2502 while (txr->bnx_tx_saved_considx != tx_cons) {
2505 idx = txr->bnx_tx_saved_considx;
2506 if (txr->bnx_tx_chain[idx] != NULL) {
2507 IFNET_STAT_INC(ifp, opackets, 1);
2508 bus_dmamap_unload(txr->bnx_tx_mtag,
2509 txr->bnx_tx_dmamap[idx]);
2510 m_freem(txr->bnx_tx_chain[idx]);
2511 txr->bnx_tx_chain[idx] = NULL;
2514 BNX_INC(txr->bnx_tx_saved_considx, BGE_TX_RING_CNT);
2517 if ((BGE_TX_RING_CNT - txr->bnx_txcnt) >=
2518 (BNX_NSEG_RSVD + BNX_NSEG_SPARE))
2519 ifq_clr_oactive(&ifp->if_snd);
2521 if (txr->bnx_txcnt == 0)
2524 if (!ifq_is_empty(&ifp->if_snd))
2528 #ifdef IFPOLL_ENABLE
2531 bnx_npoll(struct ifnet *ifp, struct ifpoll_info *info)
2533 struct bnx_softc *sc = ifp->if_softc;
2535 ASSERT_SERIALIZED(ifp->if_serializer);
2538 int cpuid = sc->bnx_npoll.ifpc_cpuid;
2540 info->ifpi_rx[cpuid].poll_func = bnx_npoll_compat;
2541 info->ifpi_rx[cpuid].arg = NULL;
2542 info->ifpi_rx[cpuid].serializer = ifp->if_serializer;
2544 if (ifp->if_flags & IFF_RUNNING)
2545 bnx_disable_intr(sc);
2546 ifq_set_cpuid(&ifp->if_snd, cpuid);
2548 if (ifp->if_flags & IFF_RUNNING)
2549 bnx_enable_intr(sc);
2550 ifq_set_cpuid(&ifp->if_snd, sc->bnx_intr_cpuid);
2555 bnx_npoll_compat(struct ifnet *ifp, void *arg __unused, int cycle)
2557 struct bnx_softc *sc = ifp->if_softc;
2558 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; /* XXX */
2559 struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
2560 uint16_t rx_prod, tx_cons;
2562 ASSERT_SERIALIZED(ifp->if_serializer);
2564 if (sc->bnx_npoll.ifpc_stcount-- == 0) {
2565 sc->bnx_npoll.ifpc_stcount = sc->bnx_npoll.ifpc_stfrac;
2567 * Process link state changes.
2572 sc->bnx_status_tag = sblk->bge_status_tag;
2575 * Use a load fence to ensure that status_tag is saved
2576 * before rx_prod and tx_cons.
2580 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
2581 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
2583 if (sc->bnx_rx_saved_considx != rx_prod)
2584 bnx_rxeof(sc, rx_prod, cycle);
2586 if (txr->bnx_tx_saved_considx != tx_cons)
2587 bnx_txeof(txr, tx_cons);
2589 if (sc->bnx_coal_chg)
2590 bnx_coal_change(sc);
2593 #endif /* IFPOLL_ENABLE */
2596 bnx_intr_legacy(void *xsc)
2598 struct bnx_softc *sc = xsc;
2599 struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
2601 if (sc->bnx_status_tag == sblk->bge_status_tag) {
2604 val = pci_read_config(sc->bnx_dev, BGE_PCI_PCISTATE, 4);
2605 if (val & BGE_PCISTAT_INTR_NOTACT)
2611 * Interrupt will have to be disabled if tagged status
2612 * is used, else interrupt will always be asserted on
2613 * certain chips (at least on BCM5750 AX/BX).
2615 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
2623 struct bnx_softc *sc = xsc;
2625 /* Disable interrupt first */
2626 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
2631 bnx_msi_oneshot(void *xsc)
2637 bnx_intr(struct bnx_softc *sc)
2639 struct ifnet *ifp = &sc->arpcom.ac_if;
2640 struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
2641 uint16_t rx_prod, tx_cons;
2644 sc->bnx_status_tag = sblk->bge_status_tag;
2646 * Use a load fence to ensure that status_tag is saved
2647 * before rx_prod, tx_cons and status.
2651 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
2652 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
2653 status = sblk->bge_status;
2655 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) || sc->bnx_link_evt)
2658 if (ifp->if_flags & IFF_RUNNING) {
2659 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; /* XXX */
2661 if (sc->bnx_rx_saved_considx != rx_prod)
2662 bnx_rxeof(sc, rx_prod, -1);
2664 if (txr->bnx_tx_saved_considx != tx_cons)
2665 bnx_txeof(txr, tx_cons);
2668 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24);
2670 if (sc->bnx_coal_chg)
2671 bnx_coal_change(sc);
2677 struct bnx_softc *sc = xsc;
2678 struct ifnet *ifp = &sc->arpcom.ac_if;
2680 lwkt_serialize_enter(ifp->if_serializer);
2682 KKASSERT(mycpuid == sc->bnx_stat_cpuid);
2684 bnx_stats_update_regs(sc);
2686 if (sc->bnx_flags & BNX_FLAG_TBI) {
2688 * Since in TBI mode auto-polling can't be used we should poll
2689 * link status manually. Here we register pending link event
2690 * and trigger interrupt.
2693 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
2694 } else if (!sc->bnx_link) {
2695 mii_tick(device_get_softc(sc->bnx_miibus));
2698 callout_reset(&sc->bnx_stat_timer, hz, bnx_tick, sc);
2700 lwkt_serialize_exit(ifp->if_serializer);
2704 bnx_stats_update_regs(struct bnx_softc *sc)
2706 struct ifnet *ifp = &sc->arpcom.ac_if;
2707 struct bge_mac_stats_regs stats;
2711 s = (uint32_t *)&stats;
2712 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2713 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2717 IFNET_STAT_SET(ifp, collisions,
2718 (stats.dot3StatsSingleCollisionFrames +
2719 stats.dot3StatsMultipleCollisionFrames +
2720 stats.dot3StatsExcessiveCollisions +
2721 stats.dot3StatsLateCollisions));
2725 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2726 * pointers to descriptors.
2729 bnx_encap(struct bnx_tx_ring *txr, struct mbuf **m_head0, uint32_t *txidx,
2732 struct bge_tx_bd *d = NULL;
2733 uint16_t csum_flags = 0, vlan_tag = 0, mss = 0;
2734 bus_dma_segment_t segs[BNX_NSEG_NEW];
2736 int error, maxsegs, nsegs, idx, i;
2737 struct mbuf *m_head = *m_head0, *m_new;
2739 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
2740 #ifdef BNX_TSO_DEBUG
2744 error = bnx_setup_tso(txr, m_head0, &mss, &csum_flags);
2749 #ifdef BNX_TSO_DEBUG
2750 tso_nsegs = (m_head->m_pkthdr.len /
2751 m_head->m_pkthdr.tso_segsz) - 1;
2752 if (tso_nsegs > (BNX_TSO_NSTATS - 1))
2753 tso_nsegs = BNX_TSO_NSTATS - 1;
2754 else if (tso_nsegs < 0)
2756 txr->sc->bnx_tsosegs[tso_nsegs]++;
2758 } else if (m_head->m_pkthdr.csum_flags & BNX_CSUM_FEATURES) {
2759 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2760 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2761 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2762 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2763 if (m_head->m_flags & M_LASTFRAG)
2764 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2765 else if (m_head->m_flags & M_FRAG)
2766 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2768 if (m_head->m_flags & M_VLANTAG) {
2769 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
2770 vlan_tag = m_head->m_pkthdr.ether_vlantag;
2774 map = txr->bnx_tx_dmamap[idx];
2776 maxsegs = (BGE_TX_RING_CNT - txr->bnx_txcnt) - BNX_NSEG_RSVD;
2777 KASSERT(maxsegs >= BNX_NSEG_SPARE,
2778 ("not enough segments %d", maxsegs));
2780 if (maxsegs > BNX_NSEG_NEW)
2781 maxsegs = BNX_NSEG_NEW;
2784 * Pad outbound frame to BGE_MIN_FRAMELEN for an unusual reason.
2785 * The bge hardware will pad out Tx runts to BGE_MIN_FRAMELEN,
2786 * but when such padded frames employ the bge IP/TCP checksum
2787 * offload, the hardware checksum assist gives incorrect results
2788 * (possibly from incorporating its own padding into the UDP/TCP
2789 * checksum; who knows). If we pad such runts with zeros, the
2790 * onboard checksum comes out correct.
2792 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
2793 m_head->m_pkthdr.len < BNX_MIN_FRAMELEN) {
2794 error = m_devpad(m_head, BNX_MIN_FRAMELEN);
2799 if ((txr->bnx_sc->bnx_flags & BNX_FLAG_SHORTDMA) &&
2800 m_head->m_next != NULL) {
2801 m_new = bnx_defrag_shortdma(m_head);
2802 if (m_new == NULL) {
2806 *m_head0 = m_head = m_new;
2808 if ((m_head->m_pkthdr.csum_flags & CSUM_TSO) == 0 &&
2809 txr->bnx_sc->bnx_force_defrag && m_head->m_next != NULL) {
2811 * Forcefully defragment mbuf chain to overcome hardware
2812 * limitation which only support a single outstanding
2813 * DMA read operation. If it fails, keep moving on using
2814 * the original mbuf chain.
2816 m_new = m_defrag(m_head, MB_DONTWAIT);
2818 *m_head0 = m_head = m_new;
2821 error = bus_dmamap_load_mbuf_defrag(txr->bnx_tx_mtag, map,
2822 m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
2825 *segs_used += nsegs;
2828 bus_dmamap_sync(txr->bnx_tx_mtag, map, BUS_DMASYNC_PREWRITE);
2830 for (i = 0; ; i++) {
2831 d = &txr->bnx_tx_ring[idx];
2833 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
2834 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
2835 d->bge_len = segs[i].ds_len;
2836 d->bge_flags = csum_flags;
2837 d->bge_vlan_tag = vlan_tag;
2842 BNX_INC(idx, BGE_TX_RING_CNT);
2844 /* Mark the last segment as end of packet... */
2845 d->bge_flags |= BGE_TXBDFLAG_END;
2848 * Insure that the map for this transmission is placed at
2849 * the array index of the last descriptor in this chain.
2851 txr->bnx_tx_dmamap[*txidx] = txr->bnx_tx_dmamap[idx];
2852 txr->bnx_tx_dmamap[idx] = map;
2853 txr->bnx_tx_chain[idx] = m_head;
2854 txr->bnx_txcnt += nsegs;
2856 BNX_INC(idx, BGE_TX_RING_CNT);
2867 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2868 * to the mbuf data regions directly in the transmit descriptors.
2871 bnx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
2873 struct bnx_softc *sc = ifp->if_softc;
2874 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; /* XXX */
2875 struct mbuf *m_head = NULL;
2879 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
2881 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
2884 prodidx = txr->bnx_tx_prodidx;
2886 while (txr->bnx_tx_chain[prodidx] == NULL) {
2888 * Sanity check: avoid coming within BGE_NSEG_RSVD
2889 * descriptors of the end of the ring. Also make
2890 * sure there are BGE_NSEG_SPARE descriptors for
2891 * jumbo buffers' or TSO segments' defragmentation.
2893 if ((BGE_TX_RING_CNT - txr->bnx_txcnt) <
2894 (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) {
2895 ifq_set_oactive(&ifp->if_snd);
2899 m_head = ifq_dequeue(&ifp->if_snd, NULL);
2904 * Pack the data into the transmit ring. If we
2905 * don't have room, set the OACTIVE flag and wait
2906 * for the NIC to drain the ring.
2908 if (bnx_encap(txr, &m_head, &prodidx, &nsegs)) {
2909 ifq_set_oactive(&ifp->if_snd);
2910 IFNET_STAT_INC(ifp, oerrors, 1);
2914 if (nsegs >= txr->bnx_tx_wreg) {
2916 bnx_writembx(txr->bnx_sc, BGE_MBX_TX_HOST_PROD0_LO,
2921 ETHER_BPF_MTAP(ifp, m_head);
2924 * Set a timeout in case the chip goes out to lunch.
2931 bnx_writembx(txr->bnx_sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2933 txr->bnx_tx_prodidx = prodidx;
2939 struct bnx_softc *sc = xsc;
2940 struct ifnet *ifp = &sc->arpcom.ac_if;
2945 ASSERT_SERIALIZED(ifp->if_serializer);
2947 /* Cancel pending I/O and flush buffers. */
2953 * Init the various state machines, ring
2954 * control blocks and firmware.
2956 if (bnx_blockinit(sc)) {
2957 if_printf(ifp, "initialization failure\n");
2963 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
2964 ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
2966 /* Load our MAC address. */
2967 m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
2968 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2969 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2971 /* Enable or disable promiscuous mode as needed. */
2974 /* Program multicast filter. */
2978 if (bnx_init_rx_ring_std(sc)) {
2979 if_printf(ifp, "RX ring initialization failed\n");
2984 /* Init jumbo RX ring. */
2985 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) {
2986 if (bnx_init_rx_ring_jumbo(sc)) {
2987 if_printf(ifp, "Jumbo RX ring initialization failed\n");
2993 /* Init our RX return ring index */
2994 sc->bnx_rx_saved_considx = 0;
2997 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
2998 bnx_init_tx_ring(&sc->bnx_tx_ring[i]);
3000 /* Enable TX MAC state machine lockup fix. */
3001 mode = CSR_READ_4(sc, BGE_TX_MODE);
3002 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
3003 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
3004 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
3005 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
3006 mode |= CSR_READ_4(sc, BGE_TX_MODE) &
3007 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
3009 /* Turn on transmitter */
3010 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
3012 /* Turn on receiver */
3013 BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3016 * Set the number of good frames to receive after RX MBUF
3017 * Low Watermark has been reached. After the RX MAC receives
3018 * this number of frames, it will drop subsequent incoming
3019 * frames until the MBUF High Watermark is reached.
3021 if (BNX_IS_57765_FAMILY(sc))
3022 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
3024 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
3026 if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) {
3028 if_printf(ifp, "MSI_MODE: %#x\n",
3029 CSR_READ_4(sc, BGE_MSI_MODE));
3033 /* Tell firmware we're alive. */
3034 BNX_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3036 /* Enable host interrupts if polling(4) is not enabled. */
3037 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA, 4);
3038 #ifdef IFPOLL_ENABLE
3039 if (ifp->if_flags & IFF_NPOLLING)
3040 bnx_disable_intr(sc);
3043 bnx_enable_intr(sc);
3045 bnx_ifmedia_upd(ifp);
3047 ifp->if_flags |= IFF_RUNNING;
3048 ifq_clr_oactive(&ifp->if_snd);
3050 callout_reset_bycpu(&sc->bnx_stat_timer, hz, bnx_tick, sc,
3051 sc->bnx_stat_cpuid);
3055 * Set media options.
3058 bnx_ifmedia_upd(struct ifnet *ifp)
3060 struct bnx_softc *sc = ifp->if_softc;
3062 /* If this is a 1000baseX NIC, enable the TBI port. */
3063 if (sc->bnx_flags & BNX_FLAG_TBI) {
3064 struct ifmedia *ifm = &sc->bnx_ifmedia;
3066 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3069 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3074 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3075 BNX_CLRBIT(sc, BGE_MAC_MODE,
3076 BGE_MACMODE_HALF_DUPLEX);
3078 BNX_SETBIT(sc, BGE_MAC_MODE,
3079 BGE_MACMODE_HALF_DUPLEX);
3086 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3090 if (mii->mii_instance) {
3091 struct mii_softc *miisc;
3093 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3094 mii_phy_reset(miisc);
3099 * Force an interrupt so that we will call bnx_link_upd
3100 * if needed and clear any pending link state attention.
3101 * Without this we are not getting any further interrupts
3102 * for link state changes and thus will not UP the link and
3103 * not be able to send in bnx_start. The only way to get
3104 * things working was to receive a packet and get an RX
3107 * bnx_tick should help for fiber cards and we might not
3108 * need to do this here if BNX_FLAG_TBI is set but as
3109 * we poll for fiber anyway it should not harm.
3111 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3117 * Report current media status.
3120 bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3122 struct bnx_softc *sc = ifp->if_softc;
3124 if (sc->bnx_flags & BNX_FLAG_TBI) {
3125 ifmr->ifm_status = IFM_AVALID;
3126 ifmr->ifm_active = IFM_ETHER;
3127 if (CSR_READ_4(sc, BGE_MAC_STS) &
3128 BGE_MACSTAT_TBI_PCS_SYNCHED) {
3129 ifmr->ifm_status |= IFM_ACTIVE;
3131 ifmr->ifm_active |= IFM_NONE;
3135 ifmr->ifm_active |= IFM_1000_SX;
3136 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3137 ifmr->ifm_active |= IFM_HDX;
3139 ifmr->ifm_active |= IFM_FDX;
3141 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3144 ifmr->ifm_active = mii->mii_media_active;
3145 ifmr->ifm_status = mii->mii_media_status;
3150 bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
3152 struct bnx_softc *sc = ifp->if_softc;
3153 struct ifreq *ifr = (struct ifreq *)data;
3154 int mask, error = 0;
3156 ASSERT_SERIALIZED(ifp->if_serializer);
3160 if ((!BNX_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) ||
3161 (BNX_IS_JUMBO_CAPABLE(sc) &&
3162 ifr->ifr_mtu > BNX_JUMBO_MTU)) {
3164 } else if (ifp->if_mtu != ifr->ifr_mtu) {
3165 ifp->if_mtu = ifr->ifr_mtu;
3166 if (ifp->if_flags & IFF_RUNNING)
3171 if (ifp->if_flags & IFF_UP) {
3172 if (ifp->if_flags & IFF_RUNNING) {
3173 mask = ifp->if_flags ^ sc->bnx_if_flags;
3176 * If only the state of the PROMISC flag
3177 * changed, then just use the 'set promisc
3178 * mode' command instead of reinitializing
3179 * the entire NIC. Doing a full re-init
3180 * means reloading the firmware and waiting
3181 * for it to start up, which may take a
3182 * second or two. Similarly for ALLMULTI.
3184 if (mask & IFF_PROMISC)
3186 if (mask & IFF_ALLMULTI)
3191 } else if (ifp->if_flags & IFF_RUNNING) {
3194 sc->bnx_if_flags = ifp->if_flags;
3198 if (ifp->if_flags & IFF_RUNNING)
3203 if (sc->bnx_flags & BNX_FLAG_TBI) {
3204 error = ifmedia_ioctl(ifp, ifr,
3205 &sc->bnx_ifmedia, command);
3207 struct mii_data *mii;
3209 mii = device_get_softc(sc->bnx_miibus);
3210 error = ifmedia_ioctl(ifp, ifr,
3211 &mii->mii_media, command);
3215 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3216 if (mask & IFCAP_HWCSUM) {
3217 ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
3218 if (ifp->if_capenable & IFCAP_TXCSUM)
3219 ifp->if_hwassist |= BNX_CSUM_FEATURES;
3221 ifp->if_hwassist &= ~BNX_CSUM_FEATURES;
3223 if (mask & IFCAP_TSO) {
3224 ifp->if_capenable ^= (mask & IFCAP_TSO);
3225 if (ifp->if_capenable & IFCAP_TSO)
3226 ifp->if_hwassist |= CSUM_TSO;
3228 ifp->if_hwassist &= ~CSUM_TSO;
3232 error = ether_ioctl(ifp, command, data);
3239 bnx_watchdog(struct ifnet *ifp)
3241 struct bnx_softc *sc = ifp->if_softc;
3243 if_printf(ifp, "watchdog timeout -- resetting\n");
3247 IFNET_STAT_INC(ifp, oerrors, 1);
3249 if (!ifq_is_empty(&ifp->if_snd))
3254 * Stop the adapter and free any mbufs allocated to the
3258 bnx_stop(struct bnx_softc *sc)
3260 struct ifnet *ifp = &sc->arpcom.ac_if;
3263 ASSERT_SERIALIZED(ifp->if_serializer);
3265 callout_stop(&sc->bnx_stat_timer);
3268 * Disable all of the receiver blocks
3270 bnx_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3271 bnx_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3272 bnx_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3273 bnx_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3274 bnx_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3275 bnx_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3278 * Disable all of the transmit blocks
3280 bnx_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3281 bnx_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3282 bnx_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3283 bnx_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3284 bnx_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3285 bnx_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3288 * Shut down all of the memory managers and related
3291 bnx_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3292 bnx_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3293 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3294 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3296 /* Disable host interrupts. */
3297 bnx_disable_intr(sc);
3300 * Tell firmware we're shutting down.
3302 BNX_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3304 /* Free the RX lists. */
3305 bnx_free_rx_ring_std(sc);
3307 /* Free jumbo RX list. */
3308 if (BNX_IS_JUMBO_CAPABLE(sc))
3309 bnx_free_rx_ring_jumbo(sc);
3311 /* Free TX buffers. */
3312 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
3313 bnx_free_tx_ring(&sc->bnx_tx_ring[i]);
3315 sc->bnx_status_tag = 0;
3317 sc->bnx_coal_chg = 0;
3319 ifp->if_flags &= ~IFF_RUNNING;
3320 ifq_clr_oactive(&ifp->if_snd);
3325 * Stop all chip I/O so that the kernel's probe routines don't
3326 * get confused by errant DMAs when rebooting.
3329 bnx_shutdown(device_t dev)
3331 struct bnx_softc *sc = device_get_softc(dev);
3332 struct ifnet *ifp = &sc->arpcom.ac_if;
3334 lwkt_serialize_enter(ifp->if_serializer);
3337 lwkt_serialize_exit(ifp->if_serializer);
3341 bnx_suspend(device_t dev)
3343 struct bnx_softc *sc = device_get_softc(dev);
3344 struct ifnet *ifp = &sc->arpcom.ac_if;
3346 lwkt_serialize_enter(ifp->if_serializer);
3348 lwkt_serialize_exit(ifp->if_serializer);
3354 bnx_resume(device_t dev)
3356 struct bnx_softc *sc = device_get_softc(dev);
3357 struct ifnet *ifp = &sc->arpcom.ac_if;
3359 lwkt_serialize_enter(ifp->if_serializer);
3361 if (ifp->if_flags & IFF_UP) {
3364 if (!ifq_is_empty(&ifp->if_snd))
3368 lwkt_serialize_exit(ifp->if_serializer);
3374 bnx_setpromisc(struct bnx_softc *sc)
3376 struct ifnet *ifp = &sc->arpcom.ac_if;
3378 if (ifp->if_flags & IFF_PROMISC)
3379 BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3381 BNX_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3385 bnx_dma_free(struct bnx_softc *sc)
3389 /* Destroy RX mbuf DMA stuffs. */
3390 if (sc->bnx_cdata.bnx_rx_mtag != NULL) {
3391 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3392 bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag,
3393 sc->bnx_cdata.bnx_rx_std_dmamap[i]);
3395 bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag,
3396 sc->bnx_cdata.bnx_rx_tmpmap);
3397 bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag);
3400 /* Destroy TX rings */
3401 if (sc->bnx_tx_ring != NULL) {
3402 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
3403 bnx_destroy_tx_ring(&sc->bnx_tx_ring[i]);
3404 kfree(sc->bnx_tx_ring, M_DEVBUF);
3407 /* Destroy standard RX ring */
3408 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_std_ring_tag,
3409 sc->bnx_cdata.bnx_rx_std_ring_map,
3410 sc->bnx_ldata.bnx_rx_std_ring);
3412 if (BNX_IS_JUMBO_CAPABLE(sc))
3413 bnx_free_jumbo_mem(sc);
3415 /* Destroy RX return ring */
3416 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_return_ring_tag,
3417 sc->bnx_cdata.bnx_rx_return_ring_map,
3418 sc->bnx_ldata.bnx_rx_return_ring);
3420 /* Destroy status block */
3421 bnx_dma_block_free(sc->bnx_cdata.bnx_status_tag,
3422 sc->bnx_cdata.bnx_status_map,
3423 sc->bnx_ldata.bnx_status_block);
3425 /* Destroy the parent tag */
3426 if (sc->bnx_cdata.bnx_parent_tag != NULL)
3427 bus_dma_tag_destroy(sc->bnx_cdata.bnx_parent_tag);
3431 bnx_dma_alloc(struct bnx_softc *sc)
3433 struct ifnet *ifp = &sc->arpcom.ac_if;
3437 * Allocate the parent bus DMA tag appropriate for PCI.
3439 * All of the NetExtreme/NetLink controllers have 4GB boundary
3441 * Whenever an address crosses a multiple of the 4GB boundary
3442 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
3443 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
3444 * state machine will lockup and cause the device to hang.
3446 error = bus_dma_tag_create(NULL, 1, BGE_DMA_BOUNDARY_4G,
3447 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3449 BUS_SPACE_MAXSIZE_32BIT, 0,
3450 BUS_SPACE_MAXSIZE_32BIT,
3451 0, &sc->bnx_cdata.bnx_parent_tag);
3453 if_printf(ifp, "could not allocate parent dma tag\n");
3458 * Create DMA tag and maps for RX mbufs.
3460 error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0,
3461 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3462 NULL, NULL, MCLBYTES, 1, MCLBYTES,
3463 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
3464 &sc->bnx_cdata.bnx_rx_mtag);
3466 if_printf(ifp, "could not allocate RX mbuf dma tag\n");
3470 error = bus_dmamap_create(sc->bnx_cdata.bnx_rx_mtag,
3471 BUS_DMA_WAITOK, &sc->bnx_cdata.bnx_rx_tmpmap);
3473 bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag);
3474 sc->bnx_cdata.bnx_rx_mtag = NULL;
3478 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3479 error = bus_dmamap_create(sc->bnx_cdata.bnx_rx_mtag,
3481 &sc->bnx_cdata.bnx_rx_std_dmamap[i]);
3485 for (j = 0; j < i; ++j) {
3486 bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag,
3487 sc->bnx_cdata.bnx_rx_std_dmamap[j]);
3489 bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag);
3490 sc->bnx_cdata.bnx_rx_mtag = NULL;
3492 if_printf(ifp, "could not create DMA map for RX\n");
3498 * Create DMA stuffs for standard RX ring.
3500 error = bnx_dma_block_alloc(sc, BGE_STD_RX_RING_SZ,
3501 &sc->bnx_cdata.bnx_rx_std_ring_tag,
3502 &sc->bnx_cdata.bnx_rx_std_ring_map,
3503 (void *)&sc->bnx_ldata.bnx_rx_std_ring,
3504 &sc->bnx_ldata.bnx_rx_std_ring_paddr);
3506 if_printf(ifp, "could not create std RX ring\n");
3511 * Create jumbo buffer pool.
3513 if (BNX_IS_JUMBO_CAPABLE(sc)) {
3514 error = bnx_alloc_jumbo_mem(sc);
3516 if_printf(ifp, "could not create jumbo buffer pool\n");
3522 * Create DMA stuffs for RX return ring.
3524 error = bnx_dma_block_alloc(sc,
3525 BGE_RX_RTN_RING_SZ(BNX_RETURN_RING_CNT),
3526 &sc->bnx_cdata.bnx_rx_return_ring_tag,
3527 &sc->bnx_cdata.bnx_rx_return_ring_map,
3528 (void *)&sc->bnx_ldata.bnx_rx_return_ring,
3529 &sc->bnx_ldata.bnx_rx_return_ring_paddr);
3531 if_printf(ifp, "could not create RX ret ring\n");
3536 * Create DMA stuffs for status block.
3538 error = bnx_dma_block_alloc(sc, BGE_STATUS_BLK_SZ,
3539 &sc->bnx_cdata.bnx_status_tag,
3540 &sc->bnx_cdata.bnx_status_map,
3541 (void *)&sc->bnx_ldata.bnx_status_block,
3542 &sc->bnx_ldata.bnx_status_block_paddr);
3544 if_printf(ifp, "could not create status block\n");
3548 sc->bnx_tx_ring = kmalloc_cachealign(
3549 sizeof(struct bnx_tx_ring) * sc->bnx_tx_ringcnt, M_DEVBUF,
3551 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
3552 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
3555 error = bnx_create_tx_ring(txr);
3557 device_printf(sc->bnx_dev,
3558 "can't create %dth tx ring\n", i);
3567 bnx_dma_block_alloc(struct bnx_softc *sc, bus_size_t size, bus_dma_tag_t *tag,
3568 bus_dmamap_t *map, void **addr, bus_addr_t *paddr)
3573 error = bus_dmamem_coherent(sc->bnx_cdata.bnx_parent_tag, PAGE_SIZE, 0,
3574 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3575 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3579 *tag = dmem.dmem_tag;
3580 *map = dmem.dmem_map;
3581 *addr = dmem.dmem_addr;
3582 *paddr = dmem.dmem_busaddr;
3588 bnx_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr)
3591 bus_dmamap_unload(tag, map);
3592 bus_dmamem_free(tag, addr, map);
3593 bus_dma_tag_destroy(tag);
3598 bnx_tbi_link_upd(struct bnx_softc *sc, uint32_t status)
3600 struct ifnet *ifp = &sc->arpcom.ac_if;
3602 #define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE)
3605 * Sometimes PCS encoding errors are detected in
3606 * TBI mode (on fiber NICs), and for some reason
3607 * the chip will signal them as link changes.
3608 * If we get a link change event, but the 'PCS
3609 * encoding error' bit in the MAC status register
3610 * is set, don't bother doing a link check.
3611 * This avoids spurious "gigabit link up" messages
3612 * that sometimes appear on fiber NICs during
3613 * periods of heavy traffic.
3615 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3616 if (!sc->bnx_link) {
3618 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704) {
3619 BNX_CLRBIT(sc, BGE_MAC_MODE,
3620 BGE_MACMODE_TBI_SEND_CFGS);
3622 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3625 if_printf(ifp, "link UP\n");
3627 ifp->if_link_state = LINK_STATE_UP;
3628 if_link_state_change(ifp);
3630 } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) {
3635 if_printf(ifp, "link DOWN\n");
3637 ifp->if_link_state = LINK_STATE_DOWN;
3638 if_link_state_change(ifp);
3642 #undef PCS_ENCODE_ERR
3644 /* Clear the attention. */
3645 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3646 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3647 BGE_MACSTAT_LINK_CHANGED);
3651 bnx_copper_link_upd(struct bnx_softc *sc, uint32_t status __unused)
3653 struct ifnet *ifp = &sc->arpcom.ac_if;
3654 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3657 bnx_miibus_statchg(sc->bnx_dev);
3661 if_printf(ifp, "link UP\n");
3663 if_printf(ifp, "link DOWN\n");
3666 /* Clear the attention. */
3667 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3668 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3669 BGE_MACSTAT_LINK_CHANGED);
3673 bnx_autopoll_link_upd(struct bnx_softc *sc, uint32_t status __unused)
3675 struct ifnet *ifp = &sc->arpcom.ac_if;
3676 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3680 if (!sc->bnx_link &&
3681 (mii->mii_media_status & IFM_ACTIVE) &&
3682 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3685 if_printf(ifp, "link UP\n");
3686 } else if (sc->bnx_link &&
3687 (!(mii->mii_media_status & IFM_ACTIVE) ||
3688 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3691 if_printf(ifp, "link DOWN\n");
3694 /* Clear the attention. */
3695 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3696 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3697 BGE_MACSTAT_LINK_CHANGED);
3701 bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS)
3703 struct bnx_softc *sc = arg1;
3705 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3706 &sc->bnx_rx_coal_ticks,
3707 BNX_RX_COAL_TICKS_MIN, BNX_RX_COAL_TICKS_MAX,
3708 BNX_RX_COAL_TICKS_CHG);
3712 bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS)
3714 struct bnx_softc *sc = arg1;
3716 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3717 &sc->bnx_tx_coal_ticks,
3718 BNX_TX_COAL_TICKS_MIN, BNX_TX_COAL_TICKS_MAX,
3719 BNX_TX_COAL_TICKS_CHG);
3723 bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS)
3725 struct bnx_softc *sc = arg1;
3727 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3728 &sc->bnx_rx_coal_bds,
3729 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX,
3730 BNX_RX_COAL_BDS_CHG);
3734 bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS)
3736 struct bnx_softc *sc = arg1;
3738 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3739 &sc->bnx_tx_coal_bds,
3740 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX,
3741 BNX_TX_COAL_BDS_CHG);
3745 bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS)
3747 struct bnx_softc *sc = arg1;
3749 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3750 &sc->bnx_rx_coal_bds_int,
3751 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX,
3752 BNX_RX_COAL_BDS_INT_CHG);
3756 bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS)
3758 struct bnx_softc *sc = arg1;
3760 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3761 &sc->bnx_tx_coal_bds_int,
3762 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX,
3763 BNX_TX_COAL_BDS_INT_CHG);
3767 bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal,
3768 int coal_min, int coal_max, uint32_t coal_chg_mask)
3770 struct bnx_softc *sc = arg1;
3771 struct ifnet *ifp = &sc->arpcom.ac_if;
3774 lwkt_serialize_enter(ifp->if_serializer);
3777 error = sysctl_handle_int(oidp, &v, 0, req);
3778 if (!error && req->newptr != NULL) {
3779 if (v < coal_min || v > coal_max) {
3783 sc->bnx_coal_chg |= coal_chg_mask;
3787 lwkt_serialize_exit(ifp->if_serializer);
3792 bnx_coal_change(struct bnx_softc *sc)
3794 struct ifnet *ifp = &sc->arpcom.ac_if;
3796 ASSERT_SERIALIZED(ifp->if_serializer);
3798 if (sc->bnx_coal_chg & BNX_RX_COAL_TICKS_CHG) {
3799 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS,
3800 sc->bnx_rx_coal_ticks);
3802 CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
3805 if_printf(ifp, "rx_coal_ticks -> %u\n",
3806 sc->bnx_rx_coal_ticks);
3810 if (sc->bnx_coal_chg & BNX_TX_COAL_TICKS_CHG) {
3811 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS,
3812 sc->bnx_tx_coal_ticks);
3814 CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS);
3817 if_printf(ifp, "tx_coal_ticks -> %u\n",
3818 sc->bnx_tx_coal_ticks);
3822 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_CHG) {
3823 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS,
3824 sc->bnx_rx_coal_bds);
3826 CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
3829 if_printf(ifp, "rx_coal_bds -> %u\n",
3830 sc->bnx_rx_coal_bds);
3834 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_CHG) {
3835 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS,
3836 sc->bnx_tx_coal_bds);
3838 CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS);
3841 if_printf(ifp, "tx_coal_bds -> %u\n",
3842 sc->bnx_tx_coal_bds);
3846 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_INT_CHG) {
3847 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT,
3848 sc->bnx_rx_coal_bds_int);
3850 CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT);
3853 if_printf(ifp, "rx_coal_bds_int -> %u\n",
3854 sc->bnx_rx_coal_bds_int);
3858 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_INT_CHG) {
3859 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT,
3860 sc->bnx_tx_coal_bds_int);
3862 CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT);
3865 if_printf(ifp, "tx_coal_bds_int -> %u\n",
3866 sc->bnx_tx_coal_bds_int);
3870 sc->bnx_coal_chg = 0;
3874 bnx_intr_check(void *xsc)
3876 struct bnx_softc *sc = xsc;
3877 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; /* XXX */
3878 struct ifnet *ifp = &sc->arpcom.ac_if;
3879 struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
3881 lwkt_serialize_enter(ifp->if_serializer);
3883 KKASSERT(mycpuid == sc->bnx_intr_cpuid);
3885 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) {
3886 lwkt_serialize_exit(ifp->if_serializer);
3890 if (sblk->bge_idx[0].bge_rx_prod_idx != sc->bnx_rx_saved_considx ||
3891 sblk->bge_idx[0].bge_tx_cons_idx != txr->bnx_tx_saved_considx) {
3892 if (sc->bnx_rx_check_considx == sc->bnx_rx_saved_considx &&
3893 sc->bnx_tx_check_considx == txr->bnx_tx_saved_considx) {
3894 if (!sc->bnx_intr_maylose) {
3895 sc->bnx_intr_maylose = TRUE;
3899 if_printf(ifp, "lost interrupt\n");
3903 sc->bnx_intr_maylose = FALSE;
3904 sc->bnx_rx_check_considx = sc->bnx_rx_saved_considx;
3905 sc->bnx_tx_check_considx = txr->bnx_tx_saved_considx;
3908 callout_reset(&sc->bnx_intr_timer, BNX_INTR_CKINTVL,
3909 bnx_intr_check, sc);
3910 lwkt_serialize_exit(ifp->if_serializer);
3914 bnx_enable_intr(struct bnx_softc *sc)
3916 struct ifnet *ifp = &sc->arpcom.ac_if;
3918 lwkt_serialize_handler_enable(ifp->if_serializer);
3923 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24);
3924 if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) {
3925 /* XXX Linux driver */
3926 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24);
3930 * Unmask the interrupt when we stop polling.
3932 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_MISC_CTL,
3933 BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
3936 * Trigger another interrupt, since above writing
3937 * to interrupt mailbox0 may acknowledge pending
3940 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3942 if (sc->bnx_flags & BNX_FLAG_STATUSTAG_BUG) {
3943 sc->bnx_intr_maylose = FALSE;
3944 sc->bnx_rx_check_considx = 0;
3945 sc->bnx_tx_check_considx = 0;
3948 if_printf(ifp, "status tag bug workaround\n");
3950 /* 10ms check interval */
3951 callout_reset_bycpu(&sc->bnx_intr_timer, BNX_INTR_CKINTVL,
3952 bnx_intr_check, sc, sc->bnx_intr_cpuid);
3957 bnx_disable_intr(struct bnx_softc *sc)
3959 struct ifnet *ifp = &sc->arpcom.ac_if;
3962 * Mask the interrupt when we start polling.
3964 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL,
3965 BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
3968 * Acknowledge possible asserted interrupt.
3970 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3972 callout_stop(&sc->bnx_intr_timer);
3973 sc->bnx_intr_maylose = FALSE;
3974 sc->bnx_rx_check_considx = 0;
3975 sc->bnx_tx_check_considx = 0;
3977 sc->bnx_npoll.ifpc_stcount = 0;
3979 lwkt_serialize_handler_disable(ifp->if_serializer);
3983 bnx_get_eaddr_mem(struct bnx_softc *sc, uint8_t ether_addr[])
3988 mac_addr = bnx_readmem_ind(sc, 0x0c14);
3989 if ((mac_addr >> 16) == 0x484b) {
3990 ether_addr[0] = (uint8_t)(mac_addr >> 8);
3991 ether_addr[1] = (uint8_t)mac_addr;
3992 mac_addr = bnx_readmem_ind(sc, 0x0c18);
3993 ether_addr[2] = (uint8_t)(mac_addr >> 24);
3994 ether_addr[3] = (uint8_t)(mac_addr >> 16);
3995 ether_addr[4] = (uint8_t)(mac_addr >> 8);
3996 ether_addr[5] = (uint8_t)mac_addr;
4003 bnx_get_eaddr_nvram(struct bnx_softc *sc, uint8_t ether_addr[])
4005 int mac_offset = BGE_EE_MAC_OFFSET;
4007 if (BNX_IS_5717_PLUS(sc)) {
4010 f = pci_get_function(sc->bnx_dev);
4012 mac_offset = BGE_EE_MAC_OFFSET_5717;
4014 mac_offset += BGE_EE_MAC_OFFSET_5717_OFF;
4017 return bnx_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN);
4021 bnx_get_eaddr_eeprom(struct bnx_softc *sc, uint8_t ether_addr[])
4023 if (sc->bnx_flags & BNX_FLAG_NO_EEPROM)
4026 return bnx_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
4031 bnx_get_eaddr(struct bnx_softc *sc, uint8_t eaddr[])
4033 static const bnx_eaddr_fcn_t bnx_eaddr_funcs[] = {
4034 /* NOTE: Order is critical */
4036 bnx_get_eaddr_nvram,
4037 bnx_get_eaddr_eeprom,
4040 const bnx_eaddr_fcn_t *func;
4042 for (func = bnx_eaddr_funcs; *func != NULL; ++func) {
4043 if ((*func)(sc, eaddr) == 0)
4046 return (*func == NULL ? ENXIO : 0);
4050 * NOTE: 'm' is not freed upon failure
4053 bnx_defrag_shortdma(struct mbuf *m)
4059 * If device receive two back-to-back send BDs with less than
4060 * or equal to 8 total bytes then the device may hang. The two
4061 * back-to-back send BDs must in the same frame for this failure
4062 * to occur. Scan mbuf chains and see whether two back-to-back
4063 * send BDs are there. If this is the case, allocate new mbuf
4064 * and copy the frame to workaround the silicon bug.
4066 for (n = m, found = 0; n != NULL; n = n->m_next) {
4077 n = m_defrag(m, MB_DONTWAIT);
4084 bnx_stop_block(struct bnx_softc *sc, bus_size_t reg, uint32_t bit)
4088 BNX_CLRBIT(sc, reg, bit);
4089 for (i = 0; i < BNX_TIMEOUT; i++) {
4090 if ((CSR_READ_4(sc, reg) & bit) == 0)
4097 bnx_link_poll(struct bnx_softc *sc)
4101 status = CSR_READ_4(sc, BGE_MAC_STS);
4102 if ((status & sc->bnx_link_chg) || sc->bnx_link_evt) {
4103 sc->bnx_link_evt = 0;
4104 sc->bnx_link_upd(sc, status);
4109 bnx_enable_msi(struct bnx_softc *sc)
4113 msi_mode = CSR_READ_4(sc, BGE_MSI_MODE);
4114 msi_mode |= BGE_MSIMODE_ENABLE;
4115 if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) {
4118 * 5718-PG105-R says that "one shot" mode
4119 * does not work if MSI is used, however,
4120 * it obviously works.
4122 msi_mode &= ~BGE_MSIMODE_ONESHOT_DISABLE;
4124 CSR_WRITE_4(sc, BGE_MSI_MODE, msi_mode);
4128 bnx_dma_swap_options(struct bnx_softc *sc)
4130 uint32_t dma_options;
4132 dma_options = BGE_MODECTL_WORDSWAP_NONFRAME |
4133 BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA;
4134 #if BYTE_ORDER == BIG_ENDIAN
4135 dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME;
4137 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
4138 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
4139 dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA |
4140 BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE |
4141 BGE_MODECTL_HTX2B_ENABLE;
4147 bnx_setup_tso(struct bnx_tx_ring *txr, struct mbuf **mp,
4148 uint16_t *mss0, uint16_t *flags0)
4153 int thoff, iphlen, hoff, hlen;
4154 uint16_t flags, mss;
4157 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
4159 hoff = m->m_pkthdr.csum_lhlen;
4160 iphlen = m->m_pkthdr.csum_iphlen;
4161 thoff = m->m_pkthdr.csum_thlen;
4163 KASSERT(hoff > 0, ("invalid ether header len"));
4164 KASSERT(iphlen > 0, ("invalid ip header len"));
4165 KASSERT(thoff > 0, ("invalid tcp header len"));
4167 if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
4168 m = m_pullup(m, hoff + iphlen + thoff);
4175 ip = mtodoff(m, struct ip *, hoff);
4176 th = mtodoff(m, struct tcphdr *, hoff + iphlen);
4178 mss = m->m_pkthdr.tso_segsz;
4179 flags = BGE_TXBDFLAG_CPU_PRE_DMA | BGE_TXBDFLAG_CPU_POST_DMA;
4181 ip->ip_len = htons(mss + iphlen + thoff);
4184 hlen = (iphlen + thoff) >> 2;
4185 mss |= ((hlen & 0x3) << 14);
4186 flags |= ((hlen & 0xf8) << 7) | ((hlen & 0x4) << 2);
4195 bnx_create_tx_ring(struct bnx_tx_ring *txr)
4197 bus_size_t txmaxsz, txmaxsegsz;
4201 * Create DMA tag and maps for TX mbufs.
4203 if (txr->bnx_sc->bnx_flags & BNX_FLAG_TSO)
4204 txmaxsz = IP_MAXPACKET + sizeof(struct ether_vlan_header);
4206 txmaxsz = BNX_JUMBO_FRAMELEN;
4207 if (txr->bnx_sc->bnx_asicrev == BGE_ASICREV_BCM57766)
4208 txmaxsegsz = MCLBYTES;
4210 txmaxsegsz = PAGE_SIZE;
4211 error = bus_dma_tag_create(txr->bnx_sc->bnx_cdata.bnx_parent_tag,
4212 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
4213 txmaxsz, BNX_NSEG_NEW, txmaxsegsz,
4214 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
4217 device_printf(txr->bnx_sc->bnx_dev,
4218 "could not allocate TX mbuf dma tag\n");
4222 for (i = 0; i < BGE_TX_RING_CNT; i++) {
4223 error = bus_dmamap_create(txr->bnx_tx_mtag,
4224 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
4225 &txr->bnx_tx_dmamap[i]);
4229 for (j = 0; j < i; ++j) {
4230 bus_dmamap_destroy(txr->bnx_tx_mtag,
4231 txr->bnx_tx_dmamap[j]);
4233 bus_dma_tag_destroy(txr->bnx_tx_mtag);
4234 txr->bnx_tx_mtag = NULL;
4236 device_printf(txr->bnx_sc->bnx_dev,
4237 "could not create DMA map for TX\n");
4243 * Create DMA stuffs for TX ring.
4245 error = bnx_dma_block_alloc(txr->bnx_sc, BGE_TX_RING_SZ,
4246 &txr->bnx_tx_ring_tag, &txr->bnx_tx_ring_map,
4247 (void *)&txr->bnx_tx_ring, &txr->bnx_tx_ring_paddr);
4249 device_printf(txr->bnx_sc->bnx_dev,
4250 "could not create TX ring\n");
4254 txr->bnx_tx_wreg = BNX_TX_WREG_NSEGS;
4260 bnx_destroy_tx_ring(struct bnx_tx_ring *txr)
4262 /* Destroy TX mbuf DMA stuffs. */
4263 if (txr->bnx_tx_mtag != NULL) {
4266 for (i = 0; i < BGE_TX_RING_CNT; i++) {
4267 bus_dmamap_destroy(txr->bnx_tx_mtag,
4268 txr->bnx_tx_dmamap[i]);
4270 bus_dma_tag_destroy(txr->bnx_tx_mtag);
4273 /* Destroy TX ring */
4274 bnx_dma_block_free(txr->bnx_tx_ring_tag,
4275 txr->bnx_tx_ring_map, txr->bnx_tx_ring);