2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
37 #include "opt_ifpoll.h"
39 #include <sys/param.h>
41 #include <sys/endian.h>
42 #include <sys/kernel.h>
43 #include <sys/interrupt.h>
45 #include <sys/malloc.h>
46 #include <sys/queue.h>
48 #include <sys/serialize.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/sysctl.h>
53 #include <netinet/ip.h>
54 #include <netinet/tcp.h>
57 #include <net/ethernet.h>
59 #include <net/if_arp.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/if_poll.h>
63 #include <net/if_types.h>
64 #include <net/ifq_var.h>
65 #include <net/vlan/if_vlan_var.h>
66 #include <net/vlan/if_vlan_ether.h>
68 #include <dev/netif/mii_layer/mii.h>
69 #include <dev/netif/mii_layer/miivar.h>
70 #include <dev/netif/mii_layer/brgphyreg.h>
72 #include <bus/pci/pcidevs.h>
73 #include <bus/pci/pcireg.h>
74 #include <bus/pci/pcivar.h>
76 #include <dev/netif/bge/if_bgereg.h>
77 #include <dev/netif/bnx/if_bnxvar.h>
79 /* "device miibus" required. See GENERIC if you get errors here. */
80 #include "miibus_if.h"
82 #define BNX_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
84 #define BNX_INTR_CKINTVL ((10 * hz) / 1000) /* 10ms */
86 static const struct bnx_type {
91 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717,
92 "Broadcom BCM5717 Gigabit Ethernet" },
93 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717C,
94 "Broadcom BCM5717C Gigabit Ethernet" },
95 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718,
96 "Broadcom BCM5718 Gigabit Ethernet" },
97 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5719,
98 "Broadcom BCM5719 Gigabit Ethernet" },
99 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720_ALT,
100 "Broadcom BCM5720 Gigabit Ethernet" },
102 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5725,
103 "Broadcom BCM5725 Gigabit Ethernet" },
104 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5727,
105 "Broadcom BCM5727 Gigabit Ethernet" },
106 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5762,
107 "Broadcom BCM5762 Gigabit Ethernet" },
109 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57761,
110 "Broadcom BCM57761 Gigabit Ethernet" },
111 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57762,
112 "Broadcom BCM57762 Gigabit Ethernet" },
113 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57765,
114 "Broadcom BCM57765 Gigabit Ethernet" },
115 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57766,
116 "Broadcom BCM57766 Gigabit Ethernet" },
117 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57781,
118 "Broadcom BCM57781 Gigabit Ethernet" },
119 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57782,
120 "Broadcom BCM57782 Gigabit Ethernet" },
121 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57785,
122 "Broadcom BCM57785 Gigabit Ethernet" },
123 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57786,
124 "Broadcom BCM57786 Gigabit Ethernet" },
125 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57791,
126 "Broadcom BCM57791 Fast Ethernet" },
127 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57795,
128 "Broadcom BCM57795 Fast Ethernet" },
133 #define BNX_IS_JUMBO_CAPABLE(sc) ((sc)->bnx_flags & BNX_FLAG_JUMBO)
134 #define BNX_IS_5717_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_5717_PLUS)
135 #define BNX_IS_57765_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_57765_PLUS)
136 #define BNX_IS_57765_FAMILY(sc) \
137 ((sc)->bnx_flags & BNX_FLAG_57765_FAMILY)
139 typedef int (*bnx_eaddr_fcn_t)(struct bnx_softc *, uint8_t[]);
141 static int bnx_probe(device_t);
142 static int bnx_attach(device_t);
143 static int bnx_detach(device_t);
144 static void bnx_shutdown(device_t);
145 static int bnx_suspend(device_t);
146 static int bnx_resume(device_t);
147 static int bnx_miibus_readreg(device_t, int, int);
148 static int bnx_miibus_writereg(device_t, int, int, int);
149 static void bnx_miibus_statchg(device_t);
152 static void bnx_npoll(struct ifnet *, struct ifpoll_info *);
153 static void bnx_npoll_compat(struct ifnet *, void *, int);
155 static void bnx_intr_legacy(void *);
156 static void bnx_msi(void *);
157 static void bnx_msi_oneshot(void *);
158 static void bnx_intr(struct bnx_softc *);
159 static void bnx_enable_intr(struct bnx_softc *);
160 static void bnx_disable_intr(struct bnx_softc *);
161 static void bnx_txeof(struct bnx_tx_ring *, uint16_t);
162 static void bnx_rxeof(struct bnx_rx_ret_ring *, uint16_t, int);
163 static int bnx_alloc_intr(struct bnx_softc *);
164 static int bnx_setup_intr(struct bnx_softc *);
165 static void bnx_free_intr(struct bnx_softc *);
166 static void bnx_teardown_intr(struct bnx_softc *, int);
167 static void bnx_check_intr(void *);
169 static void bnx_start(struct ifnet *, struct ifaltq_subque *);
170 static int bnx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
171 static void bnx_init(void *);
172 static void bnx_stop(struct bnx_softc *);
173 static void bnx_watchdog(struct ifnet *);
174 static int bnx_ifmedia_upd(struct ifnet *);
175 static void bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *);
176 static void bnx_tick(void *);
178 static int bnx_alloc_jumbo_mem(struct bnx_softc *);
179 static void bnx_free_jumbo_mem(struct bnx_softc *);
180 static struct bnx_jslot
181 *bnx_jalloc(struct bnx_softc *);
182 static void bnx_jfree(void *);
183 static void bnx_jref(void *);
184 static int bnx_newbuf_std(struct bnx_rx_ret_ring *, int, int);
185 static int bnx_newbuf_jumbo(struct bnx_softc *, int, int);
186 static void bnx_setup_rxdesc_std(struct bnx_rx_std_ring *, int);
187 static void bnx_setup_rxdesc_jumbo(struct bnx_softc *, int);
188 static int bnx_init_rx_ring_std(struct bnx_rx_std_ring *);
189 static void bnx_free_rx_ring_std(struct bnx_rx_std_ring *);
190 static int bnx_init_rx_ring_jumbo(struct bnx_softc *);
191 static void bnx_free_rx_ring_jumbo(struct bnx_softc *);
192 static void bnx_free_tx_ring(struct bnx_tx_ring *);
193 static int bnx_init_tx_ring(struct bnx_tx_ring *);
194 static int bnx_create_tx_ring(struct bnx_tx_ring *);
195 static void bnx_destroy_tx_ring(struct bnx_tx_ring *);
196 static int bnx_create_rx_ret_ring(struct bnx_rx_ret_ring *);
197 static void bnx_destroy_rx_ret_ring(struct bnx_rx_ret_ring *);
198 static int bnx_dma_alloc(device_t);
199 static void bnx_dma_free(struct bnx_softc *);
200 static int bnx_dma_block_alloc(struct bnx_softc *, bus_size_t,
201 bus_dma_tag_t *, bus_dmamap_t *, void **, bus_addr_t *);
202 static void bnx_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
204 bnx_defrag_shortdma(struct mbuf *);
205 static int bnx_encap(struct bnx_tx_ring *, struct mbuf **,
207 static int bnx_setup_tso(struct bnx_tx_ring *, struct mbuf **,
208 uint16_t *, uint16_t *);
210 static void bnx_reset(struct bnx_softc *);
211 static int bnx_chipinit(struct bnx_softc *);
212 static int bnx_blockinit(struct bnx_softc *);
213 static void bnx_stop_block(struct bnx_softc *, bus_size_t, uint32_t);
214 static void bnx_enable_msi(struct bnx_softc *sc);
215 static void bnx_setmulti(struct bnx_softc *);
216 static void bnx_setpromisc(struct bnx_softc *);
217 static void bnx_stats_update_regs(struct bnx_softc *);
218 static uint32_t bnx_dma_swap_options(struct bnx_softc *);
220 static uint32_t bnx_readmem_ind(struct bnx_softc *, uint32_t);
221 static void bnx_writemem_ind(struct bnx_softc *, uint32_t, uint32_t);
223 static uint32_t bnx_readreg_ind(struct bnx_softc *, uint32_t);
225 static void bnx_writemem_direct(struct bnx_softc *, uint32_t, uint32_t);
226 static void bnx_writembx(struct bnx_softc *, int, int);
227 static int bnx_read_nvram(struct bnx_softc *, caddr_t, int, int);
228 static uint8_t bnx_eeprom_getbyte(struct bnx_softc *, uint32_t, uint8_t *);
229 static int bnx_read_eeprom(struct bnx_softc *, caddr_t, uint32_t, size_t);
231 static void bnx_tbi_link_upd(struct bnx_softc *, uint32_t);
232 static void bnx_copper_link_upd(struct bnx_softc *, uint32_t);
233 static void bnx_autopoll_link_upd(struct bnx_softc *, uint32_t);
234 static void bnx_link_poll(struct bnx_softc *);
236 static int bnx_get_eaddr_mem(struct bnx_softc *, uint8_t[]);
237 static int bnx_get_eaddr_nvram(struct bnx_softc *, uint8_t[]);
238 static int bnx_get_eaddr_eeprom(struct bnx_softc *, uint8_t[]);
239 static int bnx_get_eaddr(struct bnx_softc *, uint8_t[]);
241 static void bnx_coal_change(struct bnx_softc *);
242 static int bnx_sysctl_force_defrag(SYSCTL_HANDLER_ARGS);
243 static int bnx_sysctl_tx_wreg(SYSCTL_HANDLER_ARGS);
244 static int bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS);
245 static int bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS);
246 static int bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS);
247 static int bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS);
248 static int bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS);
249 static int bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS);
250 static int bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *,
253 static int bnx_msi_enable = 1;
254 TUNABLE_INT("hw.bnx.msi.enable", &bnx_msi_enable);
256 static device_method_t bnx_methods[] = {
257 /* Device interface */
258 DEVMETHOD(device_probe, bnx_probe),
259 DEVMETHOD(device_attach, bnx_attach),
260 DEVMETHOD(device_detach, bnx_detach),
261 DEVMETHOD(device_shutdown, bnx_shutdown),
262 DEVMETHOD(device_suspend, bnx_suspend),
263 DEVMETHOD(device_resume, bnx_resume),
266 DEVMETHOD(bus_print_child, bus_generic_print_child),
267 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
270 DEVMETHOD(miibus_readreg, bnx_miibus_readreg),
271 DEVMETHOD(miibus_writereg, bnx_miibus_writereg),
272 DEVMETHOD(miibus_statchg, bnx_miibus_statchg),
277 static DEFINE_CLASS_0(bnx, bnx_driver, bnx_methods, sizeof(struct bnx_softc));
278 static devclass_t bnx_devclass;
280 DECLARE_DUMMY_MODULE(if_bnx);
281 DRIVER_MODULE(if_bnx, pci, bnx_driver, bnx_devclass, NULL, NULL);
282 DRIVER_MODULE(miibus, bnx, miibus_driver, miibus_devclass, NULL, NULL);
285 bnx_readmem_ind(struct bnx_softc *sc, uint32_t off)
287 device_t dev = sc->bnx_dev;
290 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
291 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
292 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
297 bnx_writemem_ind(struct bnx_softc *sc, uint32_t off, uint32_t val)
299 device_t dev = sc->bnx_dev;
301 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
302 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
303 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
307 bnx_writemem_direct(struct bnx_softc *sc, uint32_t off, uint32_t val)
309 CSR_WRITE_4(sc, off, val);
313 bnx_writembx(struct bnx_softc *sc, int off, int val)
315 CSR_WRITE_4(sc, off, val);
319 * Read a sequence of bytes from NVRAM.
322 bnx_read_nvram(struct bnx_softc *sc, caddr_t dest, int off, int cnt)
328 * Read a byte of data stored in the EEPROM at address 'addr.' The
329 * BCM570x supports both the traditional bitbang interface and an
330 * auto access interface for reading the EEPROM. We use the auto
334 bnx_eeprom_getbyte(struct bnx_softc *sc, uint32_t addr, uint8_t *dest)
340 * Enable use of auto EEPROM access so we can avoid
341 * having to use the bitbang method.
343 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
345 /* Reset the EEPROM, load the clock period. */
346 CSR_WRITE_4(sc, BGE_EE_ADDR,
347 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
350 /* Issue the read EEPROM command. */
351 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
353 /* Wait for completion */
354 for(i = 0; i < BNX_TIMEOUT * 10; i++) {
356 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
360 if (i == BNX_TIMEOUT) {
361 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
366 byte = CSR_READ_4(sc, BGE_EE_DATA);
368 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
374 * Read a sequence of bytes from the EEPROM.
377 bnx_read_eeprom(struct bnx_softc *sc, caddr_t dest, uint32_t off, size_t len)
383 for (byte = 0, err = 0, i = 0; i < len; i++) {
384 err = bnx_eeprom_getbyte(sc, off + i, &byte);
394 bnx_miibus_readreg(device_t dev, int phy, int reg)
396 struct bnx_softc *sc = device_get_softc(dev);
400 KASSERT(phy == sc->bnx_phyno,
401 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno));
403 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
404 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
405 CSR_WRITE_4(sc, BGE_MI_MODE,
406 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
410 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
411 BGE_MIPHY(phy) | BGE_MIREG(reg));
413 /* Poll for the PHY register access to complete. */
414 for (i = 0; i < BNX_TIMEOUT; i++) {
416 val = CSR_READ_4(sc, BGE_MI_COMM);
417 if ((val & BGE_MICOMM_BUSY) == 0) {
419 val = CSR_READ_4(sc, BGE_MI_COMM);
423 if (i == BNX_TIMEOUT) {
424 if_printf(&sc->arpcom.ac_if, "PHY read timed out "
425 "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val);
429 /* Restore the autopoll bit if necessary. */
430 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
431 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
435 if (val & BGE_MICOMM_READFAIL)
438 return (val & 0xFFFF);
442 bnx_miibus_writereg(device_t dev, int phy, int reg, int val)
444 struct bnx_softc *sc = device_get_softc(dev);
447 KASSERT(phy == sc->bnx_phyno,
448 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno));
450 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
451 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
452 CSR_WRITE_4(sc, BGE_MI_MODE,
453 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
457 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
458 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
460 for (i = 0; i < BNX_TIMEOUT; i++) {
462 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
464 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
468 if (i == BNX_TIMEOUT) {
469 if_printf(&sc->arpcom.ac_if, "PHY write timed out "
470 "(phy %d, reg %d, val %d)\n", phy, reg, val);
473 /* Restore the autopoll bit if necessary. */
474 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
475 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
483 bnx_miibus_statchg(device_t dev)
485 struct bnx_softc *sc;
486 struct mii_data *mii;
488 sc = device_get_softc(dev);
489 mii = device_get_softc(sc->bnx_miibus);
491 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
492 (IFM_ACTIVE | IFM_AVALID)) {
493 switch (IFM_SUBTYPE(mii->mii_media_active)) {
510 if (sc->bnx_link == 0)
513 BNX_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
514 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
515 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
516 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
518 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
521 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
522 BNX_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
524 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
529 * Memory management for jumbo frames.
532 bnx_alloc_jumbo_mem(struct bnx_softc *sc)
534 struct ifnet *ifp = &sc->arpcom.ac_if;
535 struct bnx_jslot *entry;
541 * Create tag for jumbo mbufs.
542 * This is really a bit of a kludge. We allocate a special
543 * jumbo buffer pool which (thanks to the way our DMA
544 * memory allocation works) will consist of contiguous
545 * pages. This means that even though a jumbo buffer might
546 * be larger than a page size, we don't really need to
547 * map it into more than one DMA segment. However, the
548 * default mbuf tag will result in multi-segment mappings,
549 * so we have to create a special jumbo mbuf tag that
550 * lets us get away with mapping the jumbo buffers as
551 * a single segment. I think eventually the driver should
552 * be changed so that it uses ordinary mbufs and cluster
553 * buffers, i.e. jumbo frames can span multiple DMA
554 * descriptors. But that's a project for another day.
558 * Create DMA stuffs for jumbo RX ring.
560 error = bnx_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ,
561 &sc->bnx_cdata.bnx_rx_jumbo_ring_tag,
562 &sc->bnx_cdata.bnx_rx_jumbo_ring_map,
563 (void *)&sc->bnx_ldata.bnx_rx_jumbo_ring,
564 &sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
566 if_printf(ifp, "could not create jumbo RX ring\n");
571 * Create DMA stuffs for jumbo buffer block.
573 error = bnx_dma_block_alloc(sc, BNX_JMEM,
574 &sc->bnx_cdata.bnx_jumbo_tag,
575 &sc->bnx_cdata.bnx_jumbo_map,
576 (void **)&sc->bnx_ldata.bnx_jumbo_buf,
579 if_printf(ifp, "could not create jumbo buffer\n");
583 SLIST_INIT(&sc->bnx_jfree_listhead);
586 * Now divide it up into 9K pieces and save the addresses
587 * in an array. Note that we play an evil trick here by using
588 * the first few bytes in the buffer to hold the the address
589 * of the softc structure for this interface. This is because
590 * bnx_jfree() needs it, but it is called by the mbuf management
591 * code which will not pass it to us explicitly.
593 for (i = 0, ptr = sc->bnx_ldata.bnx_jumbo_buf; i < BNX_JSLOTS; i++) {
594 entry = &sc->bnx_cdata.bnx_jslots[i];
596 entry->bnx_buf = ptr;
597 entry->bnx_paddr = paddr;
598 entry->bnx_inuse = 0;
600 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead, entry, jslot_link);
609 bnx_free_jumbo_mem(struct bnx_softc *sc)
611 /* Destroy jumbo RX ring. */
612 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_jumbo_ring_tag,
613 sc->bnx_cdata.bnx_rx_jumbo_ring_map,
614 sc->bnx_ldata.bnx_rx_jumbo_ring);
616 /* Destroy jumbo buffer block. */
617 bnx_dma_block_free(sc->bnx_cdata.bnx_jumbo_tag,
618 sc->bnx_cdata.bnx_jumbo_map,
619 sc->bnx_ldata.bnx_jumbo_buf);
623 * Allocate a jumbo buffer.
625 static struct bnx_jslot *
626 bnx_jalloc(struct bnx_softc *sc)
628 struct bnx_jslot *entry;
630 lwkt_serialize_enter(&sc->bnx_jslot_serializer);
631 entry = SLIST_FIRST(&sc->bnx_jfree_listhead);
633 SLIST_REMOVE_HEAD(&sc->bnx_jfree_listhead, jslot_link);
634 entry->bnx_inuse = 1;
636 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
638 lwkt_serialize_exit(&sc->bnx_jslot_serializer);
643 * Adjust usage count on a jumbo buffer.
648 struct bnx_jslot *entry = (struct bnx_jslot *)arg;
649 struct bnx_softc *sc = entry->bnx_sc;
652 panic("bnx_jref: can't find softc pointer!");
654 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) {
655 panic("bnx_jref: asked to reference buffer "
656 "that we don't manage!");
657 } else if (entry->bnx_inuse == 0) {
658 panic("bnx_jref: buffer already free!");
660 atomic_add_int(&entry->bnx_inuse, 1);
665 * Release a jumbo buffer.
670 struct bnx_jslot *entry = (struct bnx_jslot *)arg;
671 struct bnx_softc *sc = entry->bnx_sc;
674 panic("bnx_jfree: can't find softc pointer!");
676 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) {
677 panic("bnx_jfree: asked to free buffer that we don't manage!");
678 } else if (entry->bnx_inuse == 0) {
679 panic("bnx_jfree: buffer already free!");
682 * Possible MP race to 0, use the serializer. The atomic insn
683 * is still needed for races against bnx_jref().
685 lwkt_serialize_enter(&sc->bnx_jslot_serializer);
686 atomic_subtract_int(&entry->bnx_inuse, 1);
687 if (entry->bnx_inuse == 0) {
688 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead,
691 lwkt_serialize_exit(&sc->bnx_jslot_serializer);
697 * Intialize a standard receive ring descriptor.
700 bnx_newbuf_std(struct bnx_rx_ret_ring *ret, int i, int init)
702 struct mbuf *m_new = NULL;
703 bus_dma_segment_t seg;
706 struct bnx_rx_buf *rb;
708 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
711 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
712 m_adj(m_new, ETHER_ALIGN);
714 error = bus_dmamap_load_mbuf_segment(ret->bnx_rx_mtag,
715 ret->bnx_rx_tmpmap, m_new, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
721 rb = &ret->bnx_std->bnx_rx_std_buf[i];
724 bus_dmamap_sync(ret->bnx_rx_mtag, rb->bnx_rx_dmamap,
725 BUS_DMASYNC_POSTREAD);
726 bus_dmamap_unload(ret->bnx_rx_mtag, rb->bnx_rx_dmamap);
729 map = ret->bnx_rx_tmpmap;
730 ret->bnx_rx_tmpmap = rb->bnx_rx_dmamap;
731 rb->bnx_rx_dmamap = map;
733 rb->bnx_rx_mbuf = m_new;
734 rb->bnx_rx_paddr = seg.ds_addr;
736 bnx_setup_rxdesc_std(ret->bnx_std, i);
741 bnx_setup_rxdesc_std(struct bnx_rx_std_ring *std, int i)
743 const struct bnx_rx_buf *rb;
746 rb = &std->bnx_rx_std_buf[i];
747 r = &std->bnx_rx_std_ring[i];
749 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rb->bnx_rx_paddr);
750 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rb->bnx_rx_paddr);
751 r->bge_len = rb->bnx_rx_mbuf->m_len;
753 r->bge_flags = BGE_RXBDFLAG_END;
757 * Initialize a jumbo receive ring descriptor. This allocates
758 * a jumbo buffer from the pool managed internally by the driver.
761 bnx_newbuf_jumbo(struct bnx_softc *sc, int i, int init)
763 struct mbuf *m_new = NULL;
764 struct bnx_jslot *buf;
767 /* Allocate the mbuf. */
768 MGETHDR(m_new, init ? MB_WAIT : MB_DONTWAIT, MT_DATA);
772 /* Allocate the jumbo buffer */
773 buf = bnx_jalloc(sc);
779 /* Attach the buffer to the mbuf. */
780 m_new->m_ext.ext_arg = buf;
781 m_new->m_ext.ext_buf = buf->bnx_buf;
782 m_new->m_ext.ext_free = bnx_jfree;
783 m_new->m_ext.ext_ref = bnx_jref;
784 m_new->m_ext.ext_size = BNX_JUMBO_FRAMELEN;
786 m_new->m_flags |= M_EXT;
788 m_new->m_data = m_new->m_ext.ext_buf;
789 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
791 paddr = buf->bnx_paddr;
792 m_adj(m_new, ETHER_ALIGN);
793 paddr += ETHER_ALIGN;
795 /* Save necessary information */
796 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_rx_mbuf = m_new;
797 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_rx_paddr = paddr;
799 /* Set up the descriptor. */
800 bnx_setup_rxdesc_jumbo(sc, i);
805 bnx_setup_rxdesc_jumbo(struct bnx_softc *sc, int i)
808 struct bnx_rx_buf *rc;
810 r = &sc->bnx_ldata.bnx_rx_jumbo_ring[i];
811 rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i];
813 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_rx_paddr);
814 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_rx_paddr);
815 r->bge_len = rc->bnx_rx_mbuf->m_len;
817 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
821 bnx_init_rx_ring_std(struct bnx_rx_std_ring *std)
825 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
826 /* Use the first RX return ring's tmp RX mbuf DMA map */
827 error = bnx_newbuf_std(&std->bnx_sc->bnx_rx_ret_ring[0], i, 1);
832 std->bnx_rx_std = BGE_STD_RX_RING_CNT - 1;
833 bnx_writembx(std->bnx_sc, BGE_MBX_RX_STD_PROD_LO, std->bnx_rx_std);
839 bnx_free_rx_ring_std(struct bnx_rx_std_ring *std)
843 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
844 struct bnx_rx_buf *rb = &std->bnx_rx_std_buf[i];
846 if (rb->bnx_rx_mbuf != NULL) {
847 bus_dmamap_unload(std->bnx_rx_mtag, rb->bnx_rx_dmamap);
848 m_freem(rb->bnx_rx_mbuf);
849 rb->bnx_rx_mbuf = NULL;
851 bzero(&std->bnx_rx_std_ring[i], sizeof(struct bge_rx_bd));
856 bnx_init_rx_ring_jumbo(struct bnx_softc *sc)
861 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
862 error = bnx_newbuf_jumbo(sc, i, 1);
867 sc->bnx_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
869 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb;
870 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
871 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
873 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo);
879 bnx_free_rx_ring_jumbo(struct bnx_softc *sc)
883 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
884 struct bnx_rx_buf *rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i];
886 if (rc->bnx_rx_mbuf != NULL) {
887 m_freem(rc->bnx_rx_mbuf);
888 rc->bnx_rx_mbuf = NULL;
890 bzero(&sc->bnx_ldata.bnx_rx_jumbo_ring[i],
891 sizeof(struct bge_rx_bd));
896 bnx_free_tx_ring(struct bnx_tx_ring *txr)
900 for (i = 0; i < BGE_TX_RING_CNT; i++) {
901 struct bnx_tx_buf *buf = &txr->bnx_tx_buf[i];
903 if (buf->bnx_tx_mbuf != NULL) {
904 bus_dmamap_unload(txr->bnx_tx_mtag,
906 m_freem(buf->bnx_tx_mbuf);
907 buf->bnx_tx_mbuf = NULL;
909 bzero(&txr->bnx_tx_ring[i], sizeof(struct bge_tx_bd));
911 txr->bnx_tx_saved_considx = BNX_TXCONS_UNSET;
915 bnx_init_tx_ring(struct bnx_tx_ring *txr)
918 txr->bnx_tx_saved_considx = 0;
919 txr->bnx_tx_prodidx = 0;
921 /* Initialize transmit producer index for host-memory send ring. */
922 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, txr->bnx_tx_prodidx);
928 bnx_setmulti(struct bnx_softc *sc)
931 struct ifmultiaddr *ifma;
932 uint32_t hashes[4] = { 0, 0, 0, 0 };
935 ifp = &sc->arpcom.ac_if;
937 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
938 for (i = 0; i < 4; i++)
939 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
943 /* First, zot all the existing filters. */
944 for (i = 0; i < 4; i++)
945 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
947 /* Now program new ones. */
948 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
949 if (ifma->ifma_addr->sa_family != AF_LINK)
952 LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
953 ETHER_ADDR_LEN) & 0x7f;
954 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
957 for (i = 0; i < 4; i++)
958 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
962 * Do endian, PCI and DMA initialization. Also check the on-board ROM
966 bnx_chipinit(struct bnx_softc *sc)
968 uint32_t dma_rw_ctl, mode_ctl;
971 /* Set endian type before we access any non-PCI registers. */
972 pci_write_config(sc->bnx_dev, BGE_PCI_MISC_CTL,
973 BGE_INIT | BGE_PCIMISCCTL_TAGGED_STATUS, 4);
975 /* Clear the MAC control register */
976 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
979 * Clear the MAC statistics block in the NIC's
982 for (i = BGE_STATS_BLOCK;
983 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
984 BNX_MEMWIN_WRITE(sc, i, 0);
986 for (i = BGE_STATUS_BLOCK;
987 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
988 BNX_MEMWIN_WRITE(sc, i, 0);
990 if (BNX_IS_57765_FAMILY(sc)) {
993 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) {
994 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
995 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS;
997 /* Access the lower 1K of PL PCI-E block registers. */
998 CSR_WRITE_4(sc, BGE_MODE_CTL,
999 val | BGE_MODECTL_PCIE_PL_SEL);
1001 val = CSR_READ_4(sc, BGE_PCIE_PL_LO_PHYCTL5);
1002 val |= BGE_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ;
1003 CSR_WRITE_4(sc, BGE_PCIE_PL_LO_PHYCTL5, val);
1005 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1007 if (sc->bnx_chiprev != BGE_CHIPREV_57765_AX) {
1008 /* Fix transmit hangs */
1009 val = CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL);
1010 val |= BGE_CPMU_PADRNG_CTL_RDIV2;
1011 CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL, val);
1013 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
1014 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS;
1016 /* Access the lower 1K of DL PCI-E block registers. */
1017 CSR_WRITE_4(sc, BGE_MODE_CTL,
1018 val | BGE_MODECTL_PCIE_DL_SEL);
1020 val = CSR_READ_4(sc, BGE_PCIE_DL_LO_FTSMAX);
1021 val &= ~BGE_PCIE_DL_LO_FTSMAX_MASK;
1022 val |= BGE_PCIE_DL_LO_FTSMAX_VAL;
1023 CSR_WRITE_4(sc, BGE_PCIE_DL_LO_FTSMAX, val);
1025 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1028 val = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK);
1029 val &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK;
1030 val |= BGE_CPMU_LSPD_10MB_MACCLK_6_25;
1031 CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, val);
1035 * Set up the PCI DMA control register.
1037 dma_rw_ctl = pci_read_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, 4);
1039 * Disable 32bytes cache alignment for DMA write to host memory
1042 * 64bytes cache alignment for DMA write to host memory is still
1045 dma_rw_ctl |= BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1046 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0)
1047 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1049 * Enable HW workaround for controllers that misinterpret
1050 * a status tag update and leave interrupts permanently
1053 if (sc->bnx_asicrev != BGE_ASICREV_BCM5717 &&
1054 sc->bnx_asicrev != BGE_ASICREV_BCM5762 &&
1055 !BNX_IS_57765_FAMILY(sc))
1056 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1058 if_printf(&sc->arpcom.ac_if, "DMA read/write %#x\n",
1061 pci_write_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1064 * Set up general mode register.
1066 mode_ctl = bnx_dma_swap_options(sc) | BGE_MODECTL_MAC_ATTN_INTR |
1067 BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM;
1068 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1071 * Disable memory write invalidate. Apparently it is not supported
1072 * properly by these devices. Also ensure that INTx isn't disabled,
1073 * as these chips need it even when using MSI.
1075 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_CMD,
1076 (PCIM_CMD_MWRICEN | PCIM_CMD_INTxDIS), 4);
1078 /* Set the timer prescaler (always 66Mhz) */
1079 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1085 bnx_blockinit(struct bnx_softc *sc)
1087 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0];
1088 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0];
1089 struct bge_rcb *rcb;
1096 * Initialize the memory window pointer register so that
1097 * we can access the first 32K of internal NIC RAM. This will
1098 * allow us to set up the TX send ring RCBs and the RX return
1099 * ring RCBs, plus other things which live in NIC memory.
1101 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1103 /* Configure mbuf pool watermarks */
1104 if (BNX_IS_57765_PLUS(sc)) {
1105 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1106 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) {
1107 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1108 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1110 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1111 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1114 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1115 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1116 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1119 /* Configure DMA resource watermarks */
1120 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1121 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1123 /* Enable buffer manager */
1124 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1126 * Change the arbitration algorithm of TXMBUF read request to
1127 * round-robin instead of priority based for BCM5719. When
1128 * TXFIFO is almost empty, RDMA will hold its request until
1129 * TXFIFO is not almost empty.
1131 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719)
1132 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
1133 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1134 sc->bnx_chipid == BGE_CHIPID_BCM5719_A0 ||
1135 sc->bnx_chipid == BGE_CHIPID_BCM5720_A0)
1136 val |= BGE_BMANMODE_LOMBUF_ATTN;
1137 CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1139 /* Poll for buffer manager start indication */
1140 for (i = 0; i < BNX_TIMEOUT; i++) {
1141 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1146 if (i == BNX_TIMEOUT) {
1147 if_printf(&sc->arpcom.ac_if,
1148 "buffer manager failed to start\n");
1152 /* Enable flow-through queues */
1153 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1154 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1156 /* Wait until queue initialization is complete */
1157 for (i = 0; i < BNX_TIMEOUT; i++) {
1158 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1163 if (i == BNX_TIMEOUT) {
1164 if_printf(&sc->arpcom.ac_if,
1165 "flow-through queue init failed\n");
1170 * Summary of rings supported by the controller:
1172 * Standard Receive Producer Ring
1173 * - This ring is used to feed receive buffers for "standard"
1174 * sized frames (typically 1536 bytes) to the controller.
1176 * Jumbo Receive Producer Ring
1177 * - This ring is used to feed receive buffers for jumbo sized
1178 * frames (i.e. anything bigger than the "standard" frames)
1179 * to the controller.
1181 * Mini Receive Producer Ring
1182 * - This ring is used to feed receive buffers for "mini"
1183 * sized frames to the controller.
1184 * - This feature required external memory for the controller
1185 * but was never used in a production system. Should always
1188 * Receive Return Ring
1189 * - After the controller has placed an incoming frame into a
1190 * receive buffer that buffer is moved into a receive return
1191 * ring. The driver is then responsible to passing the
1192 * buffer up to the stack. Many versions of the controller
1193 * support multiple RR rings.
1196 * - This ring is used for outgoing frames. Many versions of
1197 * the controller support multiple send rings.
1200 /* Initialize the standard receive producer ring control block. */
1201 rcb = &sc->bnx_ldata.bnx_info.bnx_std_rx_rcb;
1202 rcb->bge_hostaddr.bge_addr_lo =
1203 BGE_ADDR_LO(sc->bnx_rx_std_ring.bnx_rx_std_ring_paddr);
1204 rcb->bge_hostaddr.bge_addr_hi =
1205 BGE_ADDR_HI(sc->bnx_rx_std_ring.bnx_rx_std_ring_paddr);
1206 if (BNX_IS_57765_PLUS(sc)) {
1208 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1209 * Bits 15-2 : Maximum RX frame size
1210 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
1213 rcb->bge_maxlen_flags =
1214 BGE_RCB_MAXLEN_FLAGS(512, BNX_MAX_FRAMELEN << 2);
1217 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1218 * Bits 15-2 : Reserved (should be 0)
1219 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1222 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1224 if (BNX_IS_5717_PLUS(sc))
1225 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1227 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1228 /* Write the standard receive producer ring control block. */
1229 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1230 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1231 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1232 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1233 /* Reset the standard receive producer ring producer index. */
1234 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1237 * Initialize the jumbo RX producer ring control
1238 * block. We set the 'ring disabled' bit in the
1239 * flags field until we're actually ready to start
1240 * using this ring (i.e. once we set the MTU
1241 * high enough to require it).
1243 if (BNX_IS_JUMBO_CAPABLE(sc)) {
1244 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb;
1245 /* Get the jumbo receive producer ring RCB parameters. */
1246 rcb->bge_hostaddr.bge_addr_lo =
1247 BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
1248 rcb->bge_hostaddr.bge_addr_hi =
1249 BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
1250 rcb->bge_maxlen_flags =
1251 BGE_RCB_MAXLEN_FLAGS(BNX_MAX_FRAMELEN,
1252 BGE_RCB_FLAG_RING_DISABLED);
1253 if (BNX_IS_5717_PLUS(sc))
1254 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1256 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1257 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1258 rcb->bge_hostaddr.bge_addr_hi);
1259 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1260 rcb->bge_hostaddr.bge_addr_lo);
1261 /* Program the jumbo receive producer ring RCB parameters. */
1262 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1263 rcb->bge_maxlen_flags);
1264 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1265 /* Reset the jumbo receive producer ring producer index. */
1266 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1270 * The BD ring replenish thresholds control how often the
1271 * hardware fetches new BD's from the producer rings in host
1272 * memory. Setting the value too low on a busy system can
1273 * starve the hardware and recue the throughpout.
1275 * Set the BD ring replentish thresholds. The recommended
1276 * values are 1/8th the number of descriptors allocated to
1280 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1281 if (BNX_IS_JUMBO_CAPABLE(sc)) {
1282 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1283 BGE_JUMBO_RX_RING_CNT/8);
1285 if (BNX_IS_57765_PLUS(sc)) {
1286 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1287 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1291 * Disable all send rings by setting the 'ring disabled' bit
1292 * in the flags field of all the TX send ring control blocks,
1293 * located in NIC memory.
1295 if (BNX_IS_5717_PLUS(sc))
1297 else if (BNX_IS_57765_FAMILY(sc) ||
1298 sc->bnx_asicrev == BGE_ASICREV_BCM5762)
1302 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1303 for (i = 0; i < limit; i++) {
1304 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1305 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1306 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1307 vrcb += sizeof(struct bge_rcb);
1310 /* Configure send ring RCB 0 (we use only the first ring) */
1311 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1312 BGE_HOSTADDR(taddr, txr->bnx_tx_ring_paddr);
1313 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1314 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1315 if (BNX_IS_5717_PLUS(sc)) {
1316 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
1318 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1319 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1321 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1322 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1325 * Disable all receive return rings by setting the
1326 * 'ring disabled' bit in the flags field of all the receive
1327 * return ring control blocks, located in NIC memory.
1329 if (BNX_IS_5717_PLUS(sc)) {
1330 /* Should be 17, use 16 until we get an SRAM map. */
1332 } else if (BNX_IS_57765_FAMILY(sc) ||
1333 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1338 /* Disable all receive return rings. */
1339 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1340 for (i = 0; i < limit; i++) {
1341 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1342 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1343 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1344 BGE_RCB_FLAG_RING_DISABLED);
1345 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1346 bnx_writembx(sc, BGE_MBX_RX_CONS0_LO +
1347 (i * (sizeof(uint64_t))), 0);
1348 vrcb += sizeof(struct bge_rcb);
1352 * Set up receive return ring 0. Note that the NIC address
1353 * for RX return rings is 0x0. The return rings live entirely
1354 * within the host, so the nicaddr field in the RCB isn't used.
1356 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1357 BGE_HOSTADDR(taddr, ret->bnx_rx_ret_ring_paddr);
1358 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1359 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1360 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1361 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1362 BGE_RCB_MAXLEN_FLAGS(BNX_RETURN_RING_CNT, 0));
1364 /* Set random backoff seed for TX */
1365 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1366 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1367 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1368 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1369 BGE_TX_BACKOFF_SEED_MASK);
1371 /* Set inter-packet gap */
1373 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
1374 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1375 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
1376 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
1378 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
1381 * Specify which ring to use for packets that don't match
1384 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1387 * Configure number of RX lists. One interrupt distribution
1388 * list, sixteen active lists, one bad frames class.
1390 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1392 /* Inialize RX list placement stats mask. */
1393 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1394 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1396 /* Disable host coalescing until we get it set up */
1397 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1399 /* Poll to make sure it's shut down. */
1400 for (i = 0; i < BNX_TIMEOUT; i++) {
1401 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1406 if (i == BNX_TIMEOUT) {
1407 if_printf(&sc->arpcom.ac_if,
1408 "host coalescing engine failed to idle\n");
1412 /* Set up host coalescing defaults */
1413 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bnx_rx_coal_ticks);
1414 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bnx_tx_coal_ticks);
1415 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bnx_rx_coal_bds);
1416 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bnx_tx_coal_bds);
1417 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, sc->bnx_rx_coal_bds_int);
1418 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, sc->bnx_tx_coal_bds_int);
1420 /* Set up address of status block */
1421 bzero(sc->bnx_ldata.bnx_status_block, BGE_STATUS_BLK_SZ);
1422 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1423 BGE_ADDR_HI(sc->bnx_ldata.bnx_status_block_paddr));
1424 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1425 BGE_ADDR_LO(sc->bnx_ldata.bnx_status_block_paddr));
1427 /* Set up status block partail update size. */
1428 val = BGE_STATBLKSZ_32BYTE;
1431 * Does not seem to have visible effect in both
1432 * bulk data (1472B UDP datagram) and tiny data
1433 * (18B UDP datagram) TX tests.
1435 val |= BGE_HCCMODE_CLRTICK_TX;
1437 /* Turn on host coalescing state machine */
1438 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1440 /* Turn on RX BD completion state machine and enable attentions */
1441 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1442 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1444 /* Turn on RX list placement state machine */
1445 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1447 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1448 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1449 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1450 BGE_MACMODE_FRMHDR_DMA_ENB;
1452 if (sc->bnx_flags & BNX_FLAG_TBI)
1453 val |= BGE_PORTMODE_TBI;
1454 else if (sc->bnx_flags & BNX_FLAG_MII_SERDES)
1455 val |= BGE_PORTMODE_GMII;
1457 val |= BGE_PORTMODE_MII;
1459 /* Turn on DMA, clear stats */
1460 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1462 /* Set misc. local control, enable interrupts on attentions */
1463 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1466 /* Assert GPIO pins for PHY reset */
1467 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1468 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1469 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1470 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1473 /* Turn on write DMA state machine */
1474 val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1475 /* Enable host coalescing bug fix. */
1476 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1477 if (sc->bnx_asicrev == BGE_ASICREV_BCM5785) {
1478 /* Request larger DMA burst size to get better performance. */
1479 val |= BGE_WDMAMODE_BURST_ALL_DATA;
1481 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1484 if (BNX_IS_57765_PLUS(sc)) {
1485 uint32_t dmactl, dmactl_reg;
1487 if (sc->bnx_asicrev == BGE_ASICREV_BCM5762)
1488 dmactl_reg = BGE_RDMA_RSRVCTRL2;
1490 dmactl_reg = BGE_RDMA_RSRVCTRL;
1492 dmactl = CSR_READ_4(sc, dmactl_reg);
1494 * Adjust tx margin to prevent TX data corruption and
1495 * fix internal FIFO overflow.
1497 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1498 sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
1499 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1500 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
1501 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
1502 BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
1503 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
1504 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
1505 BGE_RDMA_RSRVCTRL_TXMRGN_320B;
1508 * Enable fix for read DMA FIFO overruns.
1509 * The fix is to limit the number of RX BDs
1510 * the hardware would fetch at a fime.
1512 CSR_WRITE_4(sc, dmactl_reg,
1513 dmactl | BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
1516 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) {
1517 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
1518 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
1519 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
1520 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
1521 } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
1522 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1525 if (sc->bnx_asicrev == BGE_ASICREV_BCM5762)
1526 ctrl_reg = BGE_RDMA_LSO_CRPTEN_CTRL2;
1528 ctrl_reg = BGE_RDMA_LSO_CRPTEN_CTRL;
1531 * Allow 4KB burst length reads for non-LSO frames.
1532 * Enable 512B burst length reads for buffer descriptors.
1534 CSR_WRITE_4(sc, ctrl_reg,
1535 CSR_READ_4(sc, ctrl_reg) |
1536 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
1537 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
1540 /* Turn on read DMA state machine */
1541 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1542 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717)
1543 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
1544 if (sc->bnx_asicrev == BGE_ASICREV_BCM5784 ||
1545 sc->bnx_asicrev == BGE_ASICREV_BCM5785 ||
1546 sc->bnx_asicrev == BGE_ASICREV_BCM57780) {
1547 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1548 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1549 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1551 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
1552 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1553 val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
1554 BGE_RDMAMODE_H2BNC_VLAN_DET;
1556 * Allow multiple outstanding read requests from
1557 * non-LSO read DMA engine.
1559 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
1561 if (sc->bnx_asicrev == BGE_ASICREV_BCM57766)
1562 val |= BGE_RDMAMODE_JMB_2K_MMRR;
1563 if (sc->bnx_flags & BNX_FLAG_TSO)
1564 val |= BGE_RDMAMODE_TSO4_ENABLE;
1565 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1566 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1569 /* Turn on RX data completion state machine */
1570 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1572 /* Turn on RX BD initiator state machine */
1573 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1575 /* Turn on RX data and RX BD initiator state machine */
1576 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1578 /* Turn on send BD completion state machine */
1579 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1581 /* Turn on send data completion state machine */
1582 val = BGE_SDCMODE_ENABLE;
1583 if (sc->bnx_asicrev == BGE_ASICREV_BCM5761)
1584 val |= BGE_SDCMODE_CDELAY;
1585 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1587 /* Turn on send data initiator state machine */
1588 if (sc->bnx_flags & BNX_FLAG_TSO) {
1589 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
1590 BGE_SDIMODE_HW_LSO_PRE_DMA);
1592 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1595 /* Turn on send BD initiator state machine */
1596 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1598 /* Turn on send BD selector state machine */
1599 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1601 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1602 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1603 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1605 /* ack/clear link change events */
1606 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1607 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1608 BGE_MACSTAT_LINK_CHANGED);
1609 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1612 * Enable attention when the link has changed state for
1613 * devices that use auto polling.
1615 if (sc->bnx_flags & BNX_FLAG_TBI) {
1616 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1618 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
1619 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
1625 * Clear any pending link state attention.
1626 * Otherwise some link state change events may be lost until attention
1627 * is cleared by bnx_intr() -> bnx_softc.bnx_link_upd() sequence.
1628 * It's not necessary on newer BCM chips - perhaps enabling link
1629 * state change attentions implies clearing pending attention.
1631 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1632 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1633 BGE_MACSTAT_LINK_CHANGED);
1635 /* Enable link state change attentions. */
1636 BNX_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1642 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1643 * against our list and return its name if we find a match. Note
1644 * that since the Broadcom controller contains VPD support, we
1645 * can get the device name string from the controller itself instead
1646 * of the compiled-in string. This is a little slow, but it guarantees
1647 * we'll always announce the right product name.
1650 bnx_probe(device_t dev)
1652 const struct bnx_type *t;
1653 uint16_t product, vendor;
1655 if (!pci_is_pcie(dev))
1658 product = pci_get_device(dev);
1659 vendor = pci_get_vendor(dev);
1661 for (t = bnx_devs; t->bnx_name != NULL; t++) {
1662 if (vendor == t->bnx_vid && product == t->bnx_did)
1665 if (t->bnx_name == NULL)
1668 device_set_desc(dev, t->bnx_name);
1673 bnx_attach(device_t dev)
1676 struct bnx_softc *sc;
1678 int error = 0, rid, capmask;
1679 uint8_t ether_addr[ETHER_ADDR_LEN];
1681 uintptr_t mii_priv = 0;
1682 #ifdef BNX_TSO_DEBUG
1687 sc = device_get_softc(dev);
1689 callout_init_mp(&sc->bnx_stat_timer);
1690 lwkt_serialize_init(&sc->bnx_jslot_serializer);
1691 lwkt_serialize_init(&sc->bnx_main_serialize);
1693 product = pci_get_device(dev);
1695 #ifndef BURN_BRIDGES
1696 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1699 irq = pci_read_config(dev, PCIR_INTLINE, 4);
1700 mem = pci_read_config(dev, BGE_PCI_BAR0, 4);
1702 device_printf(dev, "chip is in D%d power mode "
1703 "-- setting to D0\n", pci_get_powerstate(dev));
1705 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1707 pci_write_config(dev, PCIR_INTLINE, irq, 4);
1708 pci_write_config(dev, BGE_PCI_BAR0, mem, 4);
1710 #endif /* !BURN_BRIDGE */
1713 * Map control/status registers.
1715 pci_enable_busmaster(dev);
1718 sc->bnx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1721 if (sc->bnx_res == NULL) {
1722 device_printf(dev, "couldn't map memory\n");
1726 sc->bnx_btag = rman_get_bustag(sc->bnx_res);
1727 sc->bnx_bhandle = rman_get_bushandle(sc->bnx_res);
1729 /* Save various chip information */
1731 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
1732 BGE_PCIMISCCTL_ASICREV_SHIFT;
1733 if (BGE_ASICREV(sc->bnx_chipid) == BGE_ASICREV_USE_PRODID_REG) {
1734 /* All chips having dedicated ASICREV register have CPMU */
1735 sc->bnx_flags |= BNX_FLAG_CPMU;
1738 case PCI_PRODUCT_BROADCOM_BCM5717:
1739 case PCI_PRODUCT_BROADCOM_BCM5717C:
1740 case PCI_PRODUCT_BROADCOM_BCM5718:
1741 case PCI_PRODUCT_BROADCOM_BCM5719:
1742 case PCI_PRODUCT_BROADCOM_BCM5720_ALT:
1743 case PCI_PRODUCT_BROADCOM_BCM5725:
1744 case PCI_PRODUCT_BROADCOM_BCM5727:
1745 case PCI_PRODUCT_BROADCOM_BCM5762:
1746 sc->bnx_chipid = pci_read_config(dev,
1747 BGE_PCI_GEN2_PRODID_ASICREV, 4);
1750 case PCI_PRODUCT_BROADCOM_BCM57761:
1751 case PCI_PRODUCT_BROADCOM_BCM57762:
1752 case PCI_PRODUCT_BROADCOM_BCM57765:
1753 case PCI_PRODUCT_BROADCOM_BCM57766:
1754 case PCI_PRODUCT_BROADCOM_BCM57781:
1755 case PCI_PRODUCT_BROADCOM_BCM57782:
1756 case PCI_PRODUCT_BROADCOM_BCM57785:
1757 case PCI_PRODUCT_BROADCOM_BCM57786:
1758 case PCI_PRODUCT_BROADCOM_BCM57791:
1759 case PCI_PRODUCT_BROADCOM_BCM57795:
1760 sc->bnx_chipid = pci_read_config(dev,
1761 BGE_PCI_GEN15_PRODID_ASICREV, 4);
1765 sc->bnx_chipid = pci_read_config(dev,
1766 BGE_PCI_PRODID_ASICREV, 4);
1770 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_C0)
1771 sc->bnx_chipid = BGE_CHIPID_BCM5720_A0;
1773 sc->bnx_asicrev = BGE_ASICREV(sc->bnx_chipid);
1774 sc->bnx_chiprev = BGE_CHIPREV(sc->bnx_chipid);
1776 switch (sc->bnx_asicrev) {
1777 case BGE_ASICREV_BCM5717:
1778 case BGE_ASICREV_BCM5719:
1779 case BGE_ASICREV_BCM5720:
1780 sc->bnx_flags |= BNX_FLAG_5717_PLUS | BNX_FLAG_57765_PLUS;
1783 case BGE_ASICREV_BCM5762:
1784 sc->bnx_flags |= BNX_FLAG_57765_PLUS;
1787 case BGE_ASICREV_BCM57765:
1788 case BGE_ASICREV_BCM57766:
1789 sc->bnx_flags |= BNX_FLAG_57765_FAMILY | BNX_FLAG_57765_PLUS;
1793 sc->bnx_flags |= BNX_FLAG_TSO;
1794 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 &&
1795 sc->bnx_chipid == BGE_CHIPID_BCM5719_A0)
1796 sc->bnx_flags &= ~BNX_FLAG_TSO;
1798 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1799 BNX_IS_57765_FAMILY(sc)) {
1801 * All BCM57785 and BCM5718 families chips have a bug that
1802 * under certain situation interrupt will not be enabled
1803 * even if status tag is written to BGE_MBX_IRQ0_LO mailbox.
1805 * While BCM5719 and BCM5720 have a hardware workaround
1806 * which could fix the above bug.
1807 * See the comment near BGE_PCIDMARWCTL_TAGGED_STATUS_WA in
1810 * For the rest of the chips in these two families, we will
1811 * have to poll the status block at high rate (10ms currently)
1812 * to check whether the interrupt is hosed or not.
1813 * See bnx_check_intr() for details.
1815 sc->bnx_flags |= BNX_FLAG_STATUSTAG_BUG;
1818 sc->bnx_pciecap = pci_get_pciecap_ptr(sc->bnx_dev);
1819 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1820 sc->bnx_asicrev == BGE_ASICREV_BCM5720)
1821 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_2048);
1823 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096);
1824 device_printf(dev, "CHIP ID 0x%08x; "
1825 "ASIC REV 0x%02x; CHIP REV 0x%02x\n",
1826 sc->bnx_chipid, sc->bnx_asicrev, sc->bnx_chiprev);
1829 * Set various PHY quirk flags.
1832 capmask = MII_CAPMASK_DEFAULT;
1833 if (product == PCI_PRODUCT_BROADCOM_BCM57791 ||
1834 product == PCI_PRODUCT_BROADCOM_BCM57795) {
1836 capmask &= ~BMSR_EXTSTAT;
1839 mii_priv |= BRGPHY_FLAG_WIRESPEED;
1840 if (sc->bnx_chipid == BGE_CHIPID_BCM5762_A0)
1841 mii_priv |= BRGPHY_FLAG_5762_A0;
1843 /* Initialize if_name earlier, so if_printf could be used */
1844 ifp = &sc->arpcom.ac_if;
1845 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1847 /* Try to reset the chip. */
1850 if (bnx_chipinit(sc)) {
1851 device_printf(dev, "chip initialization failed\n");
1857 * Get station address
1859 error = bnx_get_eaddr(sc, ether_addr);
1861 device_printf(dev, "failed to read station address\n");
1866 sc->bnx_tx_ringcnt = 1;
1867 sc->bnx_rx_retcnt = 1;
1869 error = bnx_dma_alloc(dev);
1874 * Allocate interrupt
1876 error = bnx_alloc_intr(sc);
1880 /* Set default tuneable values. */
1881 sc->bnx_rx_coal_ticks = BNX_RX_COAL_TICKS_DEF;
1882 sc->bnx_tx_coal_ticks = BNX_TX_COAL_TICKS_DEF;
1883 sc->bnx_rx_coal_bds = BNX_RX_COAL_BDS_DEF;
1884 sc->bnx_tx_coal_bds = BNX_TX_COAL_BDS_DEF;
1885 sc->bnx_rx_coal_bds_int = BNX_RX_COAL_BDS_INT_DEF;
1886 sc->bnx_tx_coal_bds_int = BNX_TX_COAL_BDS_INT_DEF;
1888 /* Set up ifnet structure */
1890 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1891 ifp->if_ioctl = bnx_ioctl;
1892 ifp->if_start = bnx_start;
1893 #ifdef IFPOLL_ENABLE
1894 ifp->if_npoll = bnx_npoll;
1896 ifp->if_watchdog = bnx_watchdog;
1897 ifp->if_init = bnx_init;
1898 ifp->if_mtu = ETHERMTU;
1899 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1900 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1901 ifq_set_ready(&ifp->if_snd);
1903 ifp->if_capabilities |= IFCAP_HWCSUM;
1904 ifp->if_hwassist = BNX_CSUM_FEATURES;
1905 if (sc->bnx_flags & BNX_FLAG_TSO) {
1906 ifp->if_capabilities |= IFCAP_TSO;
1907 ifp->if_hwassist |= CSUM_TSO;
1909 ifp->if_capenable = ifp->if_capabilities;
1912 * Figure out what sort of media we have by checking the
1913 * hardware config word in the first 32k of NIC internal memory,
1914 * or fall back to examining the EEPROM if necessary.
1915 * Note: on some BCM5700 cards, this value appears to be unset.
1916 * If that's the case, we have to rely on identifying the NIC
1917 * by its PCI subsystem ID, as we do below for the SysKonnect
1920 if (bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) {
1921 hwcfg = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
1923 if (bnx_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
1925 device_printf(dev, "failed to read EEPROM\n");
1929 hwcfg = ntohl(hwcfg);
1932 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
1933 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41 ||
1934 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
1935 sc->bnx_flags |= BNX_FLAG_TBI;
1938 if (sc->bnx_flags & BNX_FLAG_CPMU)
1939 sc->bnx_mi_mode = BGE_MIMODE_500KHZ_CONST;
1941 sc->bnx_mi_mode = BGE_MIMODE_BASE;
1943 /* Setup link status update stuffs */
1944 if (sc->bnx_flags & BNX_FLAG_TBI) {
1945 sc->bnx_link_upd = bnx_tbi_link_upd;
1946 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
1947 } else if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
1948 sc->bnx_link_upd = bnx_autopoll_link_upd;
1949 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
1951 sc->bnx_link_upd = bnx_copper_link_upd;
1952 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
1955 /* Set default PHY address */
1959 * PHY address mapping for various devices.
1961 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
1962 * ---------+-------+-------+-------+-------+
1963 * BCM57XX | 1 | X | X | X |
1964 * BCM5704 | 1 | X | 1 | X |
1965 * BCM5717 | 1 | 8 | 2 | 9 |
1966 * BCM5719 | 1 | 8 | 2 | 9 |
1967 * BCM5720 | 1 | 8 | 2 | 9 |
1969 * Other addresses may respond but they are not
1970 * IEEE compliant PHYs and should be ignored.
1972 if (BNX_IS_5717_PLUS(sc)) {
1975 f = pci_get_function(dev);
1976 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_A0) {
1977 if (CSR_READ_4(sc, BGE_SGDIG_STS) &
1978 BGE_SGDIGSTS_IS_SERDES)
1979 sc->bnx_phyno = f + 8;
1981 sc->bnx_phyno = f + 1;
1983 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
1984 BGE_CPMU_PHY_STRAP_IS_SERDES)
1985 sc->bnx_phyno = f + 8;
1987 sc->bnx_phyno = f + 1;
1991 if (sc->bnx_flags & BNX_FLAG_TBI) {
1992 ifmedia_init(&sc->bnx_ifmedia, IFM_IMASK,
1993 bnx_ifmedia_upd, bnx_ifmedia_sts);
1994 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
1995 ifmedia_add(&sc->bnx_ifmedia,
1996 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
1997 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1998 ifmedia_set(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO);
1999 sc->bnx_ifmedia.ifm_media = sc->bnx_ifmedia.ifm_cur->ifm_media;
2001 struct mii_probe_args mii_args;
2003 mii_probe_args_init(&mii_args, bnx_ifmedia_upd, bnx_ifmedia_sts);
2004 mii_args.mii_probemask = 1 << sc->bnx_phyno;
2005 mii_args.mii_capmask = capmask;
2006 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY;
2007 mii_args.mii_priv = mii_priv;
2009 error = mii_probe(dev, &sc->bnx_miibus, &mii_args);
2011 device_printf(dev, "MII without any PHY!\n");
2017 * Create sysctl nodes.
2019 sysctl_ctx_init(&sc->bnx_sysctl_ctx);
2020 sc->bnx_sysctl_tree = SYSCTL_ADD_NODE(&sc->bnx_sysctl_ctx,
2021 SYSCTL_STATIC_CHILDREN(_hw),
2023 device_get_nameunit(dev),
2025 if (sc->bnx_sysctl_tree == NULL) {
2026 device_printf(dev, "can't add sysctl node\n");
2031 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2032 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2033 OID_AUTO, "rx_coal_ticks",
2034 CTLTYPE_INT | CTLFLAG_RW,
2035 sc, 0, bnx_sysctl_rx_coal_ticks, "I",
2036 "Receive coalescing ticks (usec).");
2037 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2038 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2039 OID_AUTO, "tx_coal_ticks",
2040 CTLTYPE_INT | CTLFLAG_RW,
2041 sc, 0, bnx_sysctl_tx_coal_ticks, "I",
2042 "Transmit coalescing ticks (usec).");
2043 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2044 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2045 OID_AUTO, "rx_coal_bds",
2046 CTLTYPE_INT | CTLFLAG_RW,
2047 sc, 0, bnx_sysctl_rx_coal_bds, "I",
2048 "Receive max coalesced BD count.");
2049 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2050 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2051 OID_AUTO, "tx_coal_bds",
2052 CTLTYPE_INT | CTLFLAG_RW,
2053 sc, 0, bnx_sysctl_tx_coal_bds, "I",
2054 "Transmit max coalesced BD count.");
2056 * A common design characteristic for many Broadcom
2057 * client controllers is that they only support a
2058 * single outstanding DMA read operation on the PCIe
2059 * bus. This means that it will take twice as long to
2060 * fetch a TX frame that is split into header and
2061 * payload buffers as it does to fetch a single,
2062 * contiguous TX frame (2 reads vs. 1 read). For these
2063 * controllers, coalescing buffers to reduce the number
2064 * of memory reads is effective way to get maximum
2065 * performance(about 940Mbps). Without collapsing TX
2066 * buffers the maximum TCP bulk transfer performance
2067 * is about 850Mbps. However forcing coalescing mbufs
2068 * consumes a lot of CPU cycles, so leave it off by
2071 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2072 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2073 "force_defrag", CTLTYPE_INT | CTLFLAG_RW,
2074 sc, 0, bnx_sysctl_force_defrag, "I",
2075 "Force defragment on TX path");
2077 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2078 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2079 "tx_wreg", CTLTYPE_INT | CTLFLAG_RW,
2080 sc, 0, bnx_sysctl_tx_wreg, "I",
2081 "# of segments before writing to hardware register");
2083 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2084 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2085 "rx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2086 sc, 0, bnx_sysctl_rx_coal_bds_int, "I",
2087 "Receive max coalesced BD count during interrupt.");
2088 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2089 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2090 "tx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2091 sc, 0, bnx_sysctl_tx_coal_bds_int, "I",
2092 "Transmit max coalesced BD count during interrupt.");
2094 #ifdef BNX_TSO_DEBUG
2095 for (i = 0; i < BNX_TSO_NSTATS; ++i) {
2096 ksnprintf(desc, sizeof(desc), "tso%d", i + 1);
2097 SYSCTL_ADD_ULONG(&sc->bnx_sysctl_ctx,
2098 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2099 desc, CTLFLAG_RW, &sc->bnx_tsosegs[i], "");
2104 * Call MI attach routine.
2106 ether_ifattach(ifp, ether_addr, &sc->bnx_main_serialize);
2108 ifq_set_cpuid(&ifp->if_snd, sc->bnx_tx_ring[0].bnx_tx_cpuid);
2110 #ifdef IFPOLL_ENABLE
2111 ifpoll_compat_setup(&sc->bnx_npoll,
2112 &sc->bnx_sysctl_ctx, sc->bnx_sysctl_tree,
2113 device_get_unit(dev), ifp->if_serializer);
2116 error = bnx_setup_intr(sc);
2118 ether_ifdetach(ifp);
2122 sc->bnx_stat_cpuid = sc->bnx_intr_data[0].bnx_intr_cpuid;
2131 bnx_detach(device_t dev)
2133 struct bnx_softc *sc = device_get_softc(dev);
2135 if (device_is_attached(dev)) {
2136 struct ifnet *ifp = &sc->arpcom.ac_if;
2138 lwkt_serialize_enter(ifp->if_serializer);
2141 bnx_teardown_intr(sc, sc->bnx_intr_cnt);
2142 lwkt_serialize_exit(ifp->if_serializer);
2144 ether_ifdetach(ifp);
2147 if (sc->bnx_flags & BNX_FLAG_TBI)
2148 ifmedia_removeall(&sc->bnx_ifmedia);
2150 device_delete_child(dev, sc->bnx_miibus);
2151 bus_generic_detach(dev);
2155 if (sc->bnx_res != NULL) {
2156 bus_release_resource(dev, SYS_RES_MEMORY,
2157 BGE_PCI_BAR0, sc->bnx_res);
2160 if (sc->bnx_sysctl_tree != NULL)
2161 sysctl_ctx_free(&sc->bnx_sysctl_ctx);
2169 bnx_reset(struct bnx_softc *sc)
2172 uint32_t cachesize, command, pcistate, reset;
2173 void (*write_op)(struct bnx_softc *, uint32_t, uint32_t);
2179 write_op = bnx_writemem_direct;
2181 /* Save some important PCI state. */
2182 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2183 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2184 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2186 pci_write_config(dev, BGE_PCI_MISC_CTL,
2187 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2188 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2189 BGE_PCIMISCCTL_TAGGED_STATUS, 4);
2191 /* Disable fastboot on controllers that support it. */
2193 if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n");
2194 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2197 * Write the magic number to SRAM at offset 0xB50.
2198 * When firmware finishes its initialization it will
2199 * write ~BGE_MAGIC_NUMBER to the same location.
2201 bnx_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2203 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2205 /* XXX: Broadcom Linux driver. */
2206 /* Force PCI-E 1.0a mode */
2207 if (!BNX_IS_57765_PLUS(sc) &&
2208 CSR_READ_4(sc, BGE_PCIE_PHY_TSTCTL) ==
2209 (BGE_PCIE_PHY_TSTCTL_PSCRAM |
2210 BGE_PCIE_PHY_TSTCTL_PCIE10)) {
2211 CSR_WRITE_4(sc, BGE_PCIE_PHY_TSTCTL,
2212 BGE_PCIE_PHY_TSTCTL_PSCRAM);
2214 if (sc->bnx_chipid != BGE_CHIPID_BCM5750_A0) {
2215 /* Prevent PCIE link training during global reset */
2216 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2221 * Set GPHY Power Down Override to leave GPHY
2222 * powered up in D0 uninitialized.
2224 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0)
2225 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
2227 /* Issue global reset */
2228 write_op(sc, BGE_MISC_CFG, reset);
2232 /* XXX: Broadcom Linux driver. */
2233 if (sc->bnx_chipid == BGE_CHIPID_BCM5750_A0) {
2236 DELAY(500000); /* wait for link training to complete */
2237 v = pci_read_config(dev, 0xc4, 4);
2238 pci_write_config(dev, 0xc4, v | (1<<15), 4);
2241 devctl = pci_read_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL, 2);
2243 /* Disable no snoop and disable relaxed ordering. */
2244 devctl &= ~(PCIEM_DEVCTL_RELAX_ORDER | PCIEM_DEVCTL_NOSNOOP);
2246 /* Old PCI-E chips only support 128 bytes Max PayLoad Size. */
2247 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0) {
2248 devctl &= ~PCIEM_DEVCTL_MAX_PAYLOAD_MASK;
2249 devctl |= PCIEM_DEVCTL_MAX_PAYLOAD_128;
2252 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL,
2255 /* Clear error status. */
2256 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVSTS,
2257 PCIEM_DEVSTS_CORR_ERR |
2258 PCIEM_DEVSTS_NFATAL_ERR |
2259 PCIEM_DEVSTS_FATAL_ERR |
2260 PCIEM_DEVSTS_UNSUPP_REQ, 2);
2262 /* Reset some of the PCI state that got zapped by reset */
2263 pci_write_config(dev, BGE_PCI_MISC_CTL,
2264 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2265 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2266 BGE_PCIMISCCTL_TAGGED_STATUS, 4);
2267 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2268 pci_write_config(dev, BGE_PCI_CMD, command, 4);
2269 write_op(sc, BGE_MISC_CFG, (65 << 1));
2271 /* Enable memory arbiter */
2272 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2275 * Poll until we see the 1's complement of the magic number.
2276 * This indicates that the firmware initialization is complete.
2278 for (i = 0; i < BNX_FIRMWARE_TIMEOUT; i++) {
2279 val = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2280 if (val == ~BGE_MAGIC_NUMBER)
2284 if (i == BNX_FIRMWARE_TIMEOUT) {
2285 if_printf(&sc->arpcom.ac_if, "firmware handshake "
2286 "timed out, found 0x%08x\n", val);
2289 /* BCM57765 A0 needs additional time before accessing. */
2290 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0)
2294 * XXX Wait for the value of the PCISTATE register to
2295 * return to its original pre-reset state. This is a
2296 * fairly good indicator of reset completion. If we don't
2297 * wait for the reset to fully complete, trying to read
2298 * from the device's non-PCI registers may yield garbage
2301 for (i = 0; i < BNX_TIMEOUT; i++) {
2302 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2307 /* Fix up byte swapping */
2308 CSR_WRITE_4(sc, BGE_MODE_CTL, bnx_dma_swap_options(sc));
2310 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2313 * The 5704 in TBI mode apparently needs some special
2314 * adjustment to insure the SERDES drive level is set
2317 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704 &&
2318 (sc->bnx_flags & BNX_FLAG_TBI)) {
2321 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2322 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2323 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2326 CSR_WRITE_4(sc, BGE_MI_MODE,
2327 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
2330 /* XXX: Broadcom Linux driver. */
2331 if (!BNX_IS_57765_PLUS(sc)) {
2334 /* Enable Data FIFO protection. */
2335 v = CSR_READ_4(sc, BGE_PCIE_TLDLPL_PORT);
2336 CSR_WRITE_4(sc, BGE_PCIE_TLDLPL_PORT, v | (1 << 25));
2341 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
2342 BNX_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
2343 CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
2348 * Frame reception handling. This is called if there's a frame
2349 * on the receive return list.
2351 * Note: we have to be able to handle two possibilities here:
2352 * 1) the frame is from the jumbo recieve ring
2353 * 2) the frame is from the standard receive ring
2357 bnx_rxeof(struct bnx_rx_ret_ring *ret, uint16_t rx_prod, int count)
2359 struct bnx_softc *sc = ret->bnx_sc;
2360 struct bnx_rx_std_ring *std = ret->bnx_std;
2361 struct ifnet *ifp = &sc->arpcom.ac_if;
2362 int stdcnt = 0, jumbocnt = 0;
2364 while (ret->bnx_rx_saved_considx != rx_prod && count != 0) {
2365 struct bge_rx_bd *cur_rx;
2367 struct mbuf *m = NULL;
2368 uint16_t vlan_tag = 0;
2373 cur_rx = &ret->bnx_rx_ret_ring[ret->bnx_rx_saved_considx];
2375 rxidx = cur_rx->bge_idx;
2376 BNX_INC(ret->bnx_rx_saved_considx, BNX_RETURN_RING_CNT);
2378 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2380 vlan_tag = cur_rx->bge_vlan_tag;
2383 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2384 BNX_INC(sc->bnx_jumbo, BGE_JUMBO_RX_RING_CNT);
2387 if (rxidx != sc->bnx_jumbo) {
2388 IFNET_STAT_INC(ifp, ierrors, 1);
2389 if_printf(ifp, "sw jumbo index(%d) "
2390 "and hw jumbo index(%d) mismatch, drop!\n",
2391 sc->bnx_jumbo, rxidx);
2392 bnx_setup_rxdesc_jumbo(sc, rxidx);
2396 m = sc->bnx_cdata.bnx_rx_jumbo_chain[rxidx].bnx_rx_mbuf;
2397 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2398 IFNET_STAT_INC(ifp, ierrors, 1);
2399 bnx_setup_rxdesc_jumbo(sc, sc->bnx_jumbo);
2402 if (bnx_newbuf_jumbo(sc, sc->bnx_jumbo, 0)) {
2403 IFNET_STAT_INC(ifp, ierrors, 1);
2404 bnx_setup_rxdesc_jumbo(sc, sc->bnx_jumbo);
2408 BNX_INC(std->bnx_rx_std, BGE_STD_RX_RING_CNT);
2411 if (rxidx != std->bnx_rx_std) {
2412 IFNET_STAT_INC(ifp, ierrors, 1);
2413 if_printf(ifp, "sw std index(%d) "
2414 "and hw std index(%d) mismatch, drop!\n",
2415 std->bnx_rx_std, rxidx);
2416 bnx_setup_rxdesc_std(std, rxidx);
2420 m = std->bnx_rx_std_buf[rxidx].bnx_rx_mbuf;
2421 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2422 IFNET_STAT_INC(ifp, ierrors, 1);
2423 bnx_setup_rxdesc_std(std, std->bnx_rx_std);
2426 if (bnx_newbuf_std(ret, std->bnx_rx_std, 0)) {
2427 IFNET_STAT_INC(ifp, ierrors, 1);
2428 bnx_setup_rxdesc_std(std, std->bnx_rx_std);
2433 IFNET_STAT_INC(ifp, ipackets, 1);
2434 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2435 m->m_pkthdr.rcvif = ifp;
2437 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2438 (cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
2439 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2440 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2441 if ((cur_rx->bge_error_flag &
2442 BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
2443 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2445 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2446 m->m_pkthdr.csum_data =
2447 cur_rx->bge_tcp_udp_csum;
2448 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
2454 * If we received a packet with a vlan tag, pass it
2455 * to vlan_input() instead of ether_input().
2458 m->m_flags |= M_VLANTAG;
2459 m->m_pkthdr.ether_vlantag = vlan_tag;
2461 ifp->if_input(ifp, m);
2464 bnx_writembx(sc, BGE_MBX_RX_CONS0_LO, ret->bnx_rx_saved_considx);
2466 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, std->bnx_rx_std);
2468 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo);
2472 bnx_txeof(struct bnx_tx_ring *txr, uint16_t tx_cons)
2474 struct ifnet *ifp = &txr->bnx_sc->arpcom.ac_if;
2477 * Go through our tx ring and free mbufs for those
2478 * frames that have been sent.
2480 while (txr->bnx_tx_saved_considx != tx_cons) {
2481 struct bnx_tx_buf *buf;
2484 idx = txr->bnx_tx_saved_considx;
2485 buf = &txr->bnx_tx_buf[idx];
2486 if (buf->bnx_tx_mbuf != NULL) {
2487 IFNET_STAT_INC(ifp, opackets, 1);
2488 bus_dmamap_unload(txr->bnx_tx_mtag,
2489 buf->bnx_tx_dmamap);
2490 m_freem(buf->bnx_tx_mbuf);
2491 buf->bnx_tx_mbuf = NULL;
2494 BNX_INC(txr->bnx_tx_saved_considx, BGE_TX_RING_CNT);
2497 if ((BGE_TX_RING_CNT - txr->bnx_tx_cnt) >=
2498 (BNX_NSEG_RSVD + BNX_NSEG_SPARE))
2499 ifq_clr_oactive(&ifp->if_snd);
2501 if (txr->bnx_tx_cnt == 0)
2504 if (!ifq_is_empty(&ifp->if_snd))
2508 #ifdef IFPOLL_ENABLE
2511 bnx_npoll(struct ifnet *ifp, struct ifpoll_info *info)
2513 struct bnx_softc *sc = ifp->if_softc;
2515 ASSERT_SERIALIZED(ifp->if_serializer);
2518 int cpuid = sc->bnx_npoll.ifpc_cpuid;
2520 info->ifpi_rx[cpuid].poll_func = bnx_npoll_compat;
2521 info->ifpi_rx[cpuid].arg = NULL;
2522 info->ifpi_rx[cpuid].serializer = ifp->if_serializer;
2524 if (ifp->if_flags & IFF_RUNNING)
2525 bnx_disable_intr(sc);
2526 ifq_set_cpuid(&ifp->if_snd, cpuid);
2528 if (ifp->if_flags & IFF_RUNNING)
2529 bnx_enable_intr(sc);
2530 ifq_set_cpuid(&ifp->if_snd, sc->bnx_tx_ring[0].bnx_tx_cpuid);
2535 bnx_npoll_compat(struct ifnet *ifp, void *arg __unused, int cycle)
2537 struct bnx_softc *sc = ifp->if_softc;
2538 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; /* XXX */
2539 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0]; /* XXX */
2540 struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
2541 uint16_t rx_prod, tx_cons;
2543 ASSERT_SERIALIZED(ifp->if_serializer);
2545 if (sc->bnx_npoll.ifpc_stcount-- == 0) {
2546 sc->bnx_npoll.ifpc_stcount = sc->bnx_npoll.ifpc_stfrac;
2548 * Process link state changes.
2553 sc->bnx_status_tag = sblk->bge_status_tag;
2556 * Use a load fence to ensure that status_tag is saved
2557 * before rx_prod and tx_cons.
2561 rx_prod = *ret->bnx_rx_considx;
2562 tx_cons = *txr->bnx_tx_considx;
2564 if (ret->bnx_rx_saved_considx != rx_prod)
2565 bnx_rxeof(ret, rx_prod, cycle);
2567 if (txr->bnx_tx_saved_considx != tx_cons)
2568 bnx_txeof(txr, tx_cons);
2571 #endif /* IFPOLL_ENABLE */
2574 bnx_intr_legacy(void *xsc)
2576 struct bnx_softc *sc = xsc;
2577 struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
2579 if (sc->bnx_status_tag == sblk->bge_status_tag) {
2582 val = pci_read_config(sc->bnx_dev, BGE_PCI_PCISTATE, 4);
2583 if (val & BGE_PCISTAT_INTR_NOTACT)
2589 * Interrupt will have to be disabled if tagged status
2590 * is used, else interrupt will always be asserted on
2591 * certain chips (at least on BCM5750 AX/BX).
2593 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
2601 struct bnx_softc *sc = xsc;
2603 /* Disable interrupt first */
2604 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
2609 bnx_msi_oneshot(void *xsc)
2615 bnx_intr(struct bnx_softc *sc)
2617 struct ifnet *ifp = &sc->arpcom.ac_if;
2618 struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
2621 sc->bnx_status_tag = sblk->bge_status_tag;
2623 * Use a load fence to ensure that status_tag is saved
2624 * before rx_prod, tx_cons and status.
2628 status = sblk->bge_status;
2630 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) || sc->bnx_link_evt)
2633 if (ifp->if_flags & IFF_RUNNING) {
2634 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; /* XXX */
2635 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0]; /* XXX */
2636 uint16_t rx_prod, tx_cons;
2638 rx_prod = *ret->bnx_rx_considx;
2639 tx_cons = *txr->bnx_tx_considx;
2641 if (ret->bnx_rx_saved_considx != rx_prod)
2642 bnx_rxeof(ret, rx_prod, -1);
2644 if (txr->bnx_tx_saved_considx != tx_cons)
2645 bnx_txeof(txr, tx_cons);
2648 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24);
2654 struct bnx_softc *sc = xsc;
2655 struct ifnet *ifp = &sc->arpcom.ac_if;
2657 lwkt_serialize_enter(ifp->if_serializer);
2659 KKASSERT(mycpuid == sc->bnx_stat_cpuid);
2661 bnx_stats_update_regs(sc);
2663 if (sc->bnx_flags & BNX_FLAG_TBI) {
2665 * Since in TBI mode auto-polling can't be used we should poll
2666 * link status manually. Here we register pending link event
2667 * and trigger interrupt.
2670 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
2671 } else if (!sc->bnx_link) {
2672 mii_tick(device_get_softc(sc->bnx_miibus));
2675 callout_reset(&sc->bnx_stat_timer, hz, bnx_tick, sc);
2677 lwkt_serialize_exit(ifp->if_serializer);
2681 bnx_stats_update_regs(struct bnx_softc *sc)
2683 struct ifnet *ifp = &sc->arpcom.ac_if;
2684 struct bge_mac_stats_regs stats;
2688 s = (uint32_t *)&stats;
2689 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2690 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2694 IFNET_STAT_SET(ifp, collisions,
2695 (stats.dot3StatsSingleCollisionFrames +
2696 stats.dot3StatsMultipleCollisionFrames +
2697 stats.dot3StatsExcessiveCollisions +
2698 stats.dot3StatsLateCollisions));
2702 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2703 * pointers to descriptors.
2706 bnx_encap(struct bnx_tx_ring *txr, struct mbuf **m_head0, uint32_t *txidx,
2709 struct bge_tx_bd *d = NULL;
2710 uint16_t csum_flags = 0, vlan_tag = 0, mss = 0;
2711 bus_dma_segment_t segs[BNX_NSEG_NEW];
2713 int error, maxsegs, nsegs, idx, i;
2714 struct mbuf *m_head = *m_head0, *m_new;
2716 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
2717 #ifdef BNX_TSO_DEBUG
2721 error = bnx_setup_tso(txr, m_head0, &mss, &csum_flags);
2726 #ifdef BNX_TSO_DEBUG
2727 tso_nsegs = (m_head->m_pkthdr.len /
2728 m_head->m_pkthdr.tso_segsz) - 1;
2729 if (tso_nsegs > (BNX_TSO_NSTATS - 1))
2730 tso_nsegs = BNX_TSO_NSTATS - 1;
2731 else if (tso_nsegs < 0)
2733 txr->bnx_sc->bnx_tsosegs[tso_nsegs]++;
2735 } else if (m_head->m_pkthdr.csum_flags & BNX_CSUM_FEATURES) {
2736 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2737 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2738 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2739 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2740 if (m_head->m_flags & M_LASTFRAG)
2741 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2742 else if (m_head->m_flags & M_FRAG)
2743 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2745 if (m_head->m_flags & M_VLANTAG) {
2746 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
2747 vlan_tag = m_head->m_pkthdr.ether_vlantag;
2751 map = txr->bnx_tx_buf[idx].bnx_tx_dmamap;
2753 maxsegs = (BGE_TX_RING_CNT - txr->bnx_tx_cnt) - BNX_NSEG_RSVD;
2754 KASSERT(maxsegs >= BNX_NSEG_SPARE,
2755 ("not enough segments %d", maxsegs));
2757 if (maxsegs > BNX_NSEG_NEW)
2758 maxsegs = BNX_NSEG_NEW;
2761 * Pad outbound frame to BGE_MIN_FRAMELEN for an unusual reason.
2762 * The bge hardware will pad out Tx runts to BGE_MIN_FRAMELEN,
2763 * but when such padded frames employ the bge IP/TCP checksum
2764 * offload, the hardware checksum assist gives incorrect results
2765 * (possibly from incorporating its own padding into the UDP/TCP
2766 * checksum; who knows). If we pad such runts with zeros, the
2767 * onboard checksum comes out correct.
2769 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
2770 m_head->m_pkthdr.len < BNX_MIN_FRAMELEN) {
2771 error = m_devpad(m_head, BNX_MIN_FRAMELEN);
2776 if ((txr->bnx_tx_flags & BNX_TX_FLAG_SHORTDMA) &&
2777 m_head->m_next != NULL) {
2778 m_new = bnx_defrag_shortdma(m_head);
2779 if (m_new == NULL) {
2783 *m_head0 = m_head = m_new;
2785 if ((m_head->m_pkthdr.csum_flags & CSUM_TSO) == 0 &&
2786 (txr->bnx_tx_flags & BNX_TX_FLAG_FORCE_DEFRAG) &&
2787 m_head->m_next != NULL) {
2789 * Forcefully defragment mbuf chain to overcome hardware
2790 * limitation which only support a single outstanding
2791 * DMA read operation. If it fails, keep moving on using
2792 * the original mbuf chain.
2794 m_new = m_defrag(m_head, MB_DONTWAIT);
2796 *m_head0 = m_head = m_new;
2799 error = bus_dmamap_load_mbuf_defrag(txr->bnx_tx_mtag, map,
2800 m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
2803 *segs_used += nsegs;
2806 bus_dmamap_sync(txr->bnx_tx_mtag, map, BUS_DMASYNC_PREWRITE);
2808 for (i = 0; ; i++) {
2809 d = &txr->bnx_tx_ring[idx];
2811 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
2812 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
2813 d->bge_len = segs[i].ds_len;
2814 d->bge_flags = csum_flags;
2815 d->bge_vlan_tag = vlan_tag;
2820 BNX_INC(idx, BGE_TX_RING_CNT);
2822 /* Mark the last segment as end of packet... */
2823 d->bge_flags |= BGE_TXBDFLAG_END;
2826 * Insure that the map for this transmission is placed at
2827 * the array index of the last descriptor in this chain.
2829 txr->bnx_tx_buf[*txidx].bnx_tx_dmamap = txr->bnx_tx_buf[idx].bnx_tx_dmamap;
2830 txr->bnx_tx_buf[idx].bnx_tx_dmamap = map;
2831 txr->bnx_tx_buf[idx].bnx_tx_mbuf = m_head;
2832 txr->bnx_tx_cnt += nsegs;
2834 BNX_INC(idx, BGE_TX_RING_CNT);
2845 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2846 * to the mbuf data regions directly in the transmit descriptors.
2849 bnx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
2851 struct bnx_softc *sc = ifp->if_softc;
2852 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; /* XXX */
2853 struct mbuf *m_head = NULL;
2857 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
2859 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
2862 prodidx = txr->bnx_tx_prodidx;
2864 while (txr->bnx_tx_buf[prodidx].bnx_tx_mbuf == NULL) {
2866 * Sanity check: avoid coming within BGE_NSEG_RSVD
2867 * descriptors of the end of the ring. Also make
2868 * sure there are BGE_NSEG_SPARE descriptors for
2869 * jumbo buffers' or TSO segments' defragmentation.
2871 if ((BGE_TX_RING_CNT - txr->bnx_tx_cnt) <
2872 (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) {
2873 ifq_set_oactive(&ifp->if_snd);
2877 m_head = ifq_dequeue(&ifp->if_snd, NULL);
2882 * Pack the data into the transmit ring. If we
2883 * don't have room, set the OACTIVE flag and wait
2884 * for the NIC to drain the ring.
2886 if (bnx_encap(txr, &m_head, &prodidx, &nsegs)) {
2887 ifq_set_oactive(&ifp->if_snd);
2888 IFNET_STAT_INC(ifp, oerrors, 1);
2892 if (nsegs >= txr->bnx_tx_wreg) {
2894 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, prodidx);
2898 ETHER_BPF_MTAP(ifp, m_head);
2901 * Set a timeout in case the chip goes out to lunch.
2908 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, prodidx);
2910 txr->bnx_tx_prodidx = prodidx;
2916 struct bnx_softc *sc = xsc;
2917 struct ifnet *ifp = &sc->arpcom.ac_if;
2922 ASSERT_SERIALIZED(ifp->if_serializer);
2924 /* Cancel pending I/O and flush buffers. */
2930 * Init the various state machines, ring
2931 * control blocks and firmware.
2933 if (bnx_blockinit(sc)) {
2934 if_printf(ifp, "initialization failure\n");
2940 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
2941 ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
2943 /* Load our MAC address. */
2944 m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
2945 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2946 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2948 /* Enable or disable promiscuous mode as needed. */
2951 /* Program multicast filter. */
2955 if (bnx_init_rx_ring_std(&sc->bnx_rx_std_ring)) {
2956 if_printf(ifp, "RX ring initialization failed\n");
2961 /* Init jumbo RX ring. */
2962 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) {
2963 if (bnx_init_rx_ring_jumbo(sc)) {
2964 if_printf(ifp, "Jumbo RX ring initialization failed\n");
2970 /* Init our RX return ring index */
2971 for (i = 0; i < sc->bnx_rx_retcnt; ++i)
2972 sc->bnx_rx_ret_ring[i].bnx_rx_saved_considx = 0;
2975 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
2976 bnx_init_tx_ring(&sc->bnx_tx_ring[i]);
2978 /* Enable TX MAC state machine lockup fix. */
2979 mode = CSR_READ_4(sc, BGE_TX_MODE);
2980 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
2981 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
2982 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
2983 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
2984 mode |= CSR_READ_4(sc, BGE_TX_MODE) &
2985 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
2987 /* Turn on transmitter */
2988 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
2990 /* Turn on receiver */
2991 BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2994 * Set the number of good frames to receive after RX MBUF
2995 * Low Watermark has been reached. After the RX MAC receives
2996 * this number of frames, it will drop subsequent incoming
2997 * frames until the MBUF High Watermark is reached.
2999 if (BNX_IS_57765_FAMILY(sc))
3000 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
3002 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
3004 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI) {
3006 if_printf(ifp, "MSI_MODE: %#x\n",
3007 CSR_READ_4(sc, BGE_MSI_MODE));
3011 /* Tell firmware we're alive. */
3012 BNX_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3014 /* Enable host interrupts if polling(4) is not enabled. */
3015 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA, 4);
3016 #ifdef IFPOLL_ENABLE
3017 if (ifp->if_flags & IFF_NPOLLING)
3018 bnx_disable_intr(sc);
3021 bnx_enable_intr(sc);
3023 bnx_ifmedia_upd(ifp);
3025 ifp->if_flags |= IFF_RUNNING;
3026 ifq_clr_oactive(&ifp->if_snd);
3028 callout_reset_bycpu(&sc->bnx_stat_timer, hz, bnx_tick, sc,
3029 sc->bnx_stat_cpuid);
3033 * Set media options.
3036 bnx_ifmedia_upd(struct ifnet *ifp)
3038 struct bnx_softc *sc = ifp->if_softc;
3040 /* If this is a 1000baseX NIC, enable the TBI port. */
3041 if (sc->bnx_flags & BNX_FLAG_TBI) {
3042 struct ifmedia *ifm = &sc->bnx_ifmedia;
3044 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3047 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3052 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3053 BNX_CLRBIT(sc, BGE_MAC_MODE,
3054 BGE_MACMODE_HALF_DUPLEX);
3056 BNX_SETBIT(sc, BGE_MAC_MODE,
3057 BGE_MACMODE_HALF_DUPLEX);
3064 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3068 if (mii->mii_instance) {
3069 struct mii_softc *miisc;
3071 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3072 mii_phy_reset(miisc);
3077 * Force an interrupt so that we will call bnx_link_upd
3078 * if needed and clear any pending link state attention.
3079 * Without this we are not getting any further interrupts
3080 * for link state changes and thus will not UP the link and
3081 * not be able to send in bnx_start. The only way to get
3082 * things working was to receive a packet and get an RX
3085 * bnx_tick should help for fiber cards and we might not
3086 * need to do this here if BNX_FLAG_TBI is set but as
3087 * we poll for fiber anyway it should not harm.
3089 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3095 * Report current media status.
3098 bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3100 struct bnx_softc *sc = ifp->if_softc;
3102 if (sc->bnx_flags & BNX_FLAG_TBI) {
3103 ifmr->ifm_status = IFM_AVALID;
3104 ifmr->ifm_active = IFM_ETHER;
3105 if (CSR_READ_4(sc, BGE_MAC_STS) &
3106 BGE_MACSTAT_TBI_PCS_SYNCHED) {
3107 ifmr->ifm_status |= IFM_ACTIVE;
3109 ifmr->ifm_active |= IFM_NONE;
3113 ifmr->ifm_active |= IFM_1000_SX;
3114 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3115 ifmr->ifm_active |= IFM_HDX;
3117 ifmr->ifm_active |= IFM_FDX;
3119 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3122 ifmr->ifm_active = mii->mii_media_active;
3123 ifmr->ifm_status = mii->mii_media_status;
3128 bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
3130 struct bnx_softc *sc = ifp->if_softc;
3131 struct ifreq *ifr = (struct ifreq *)data;
3132 int mask, error = 0;
3134 ASSERT_SERIALIZED(ifp->if_serializer);
3138 if ((!BNX_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) ||
3139 (BNX_IS_JUMBO_CAPABLE(sc) &&
3140 ifr->ifr_mtu > BNX_JUMBO_MTU)) {
3142 } else if (ifp->if_mtu != ifr->ifr_mtu) {
3143 ifp->if_mtu = ifr->ifr_mtu;
3144 if (ifp->if_flags & IFF_RUNNING)
3149 if (ifp->if_flags & IFF_UP) {
3150 if (ifp->if_flags & IFF_RUNNING) {
3151 mask = ifp->if_flags ^ sc->bnx_if_flags;
3154 * If only the state of the PROMISC flag
3155 * changed, then just use the 'set promisc
3156 * mode' command instead of reinitializing
3157 * the entire NIC. Doing a full re-init
3158 * means reloading the firmware and waiting
3159 * for it to start up, which may take a
3160 * second or two. Similarly for ALLMULTI.
3162 if (mask & IFF_PROMISC)
3164 if (mask & IFF_ALLMULTI)
3169 } else if (ifp->if_flags & IFF_RUNNING) {
3172 sc->bnx_if_flags = ifp->if_flags;
3176 if (ifp->if_flags & IFF_RUNNING)
3181 if (sc->bnx_flags & BNX_FLAG_TBI) {
3182 error = ifmedia_ioctl(ifp, ifr,
3183 &sc->bnx_ifmedia, command);
3185 struct mii_data *mii;
3187 mii = device_get_softc(sc->bnx_miibus);
3188 error = ifmedia_ioctl(ifp, ifr,
3189 &mii->mii_media, command);
3193 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3194 if (mask & IFCAP_HWCSUM) {
3195 ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
3196 if (ifp->if_capenable & IFCAP_TXCSUM)
3197 ifp->if_hwassist |= BNX_CSUM_FEATURES;
3199 ifp->if_hwassist &= ~BNX_CSUM_FEATURES;
3201 if (mask & IFCAP_TSO) {
3202 ifp->if_capenable ^= (mask & IFCAP_TSO);
3203 if (ifp->if_capenable & IFCAP_TSO)
3204 ifp->if_hwassist |= CSUM_TSO;
3206 ifp->if_hwassist &= ~CSUM_TSO;
3210 error = ether_ioctl(ifp, command, data);
3217 bnx_watchdog(struct ifnet *ifp)
3219 struct bnx_softc *sc = ifp->if_softc;
3221 if_printf(ifp, "watchdog timeout -- resetting\n");
3225 IFNET_STAT_INC(ifp, oerrors, 1);
3227 if (!ifq_is_empty(&ifp->if_snd))
3232 * Stop the adapter and free any mbufs allocated to the
3236 bnx_stop(struct bnx_softc *sc)
3238 struct ifnet *ifp = &sc->arpcom.ac_if;
3241 ASSERT_SERIALIZED(ifp->if_serializer);
3243 callout_stop(&sc->bnx_stat_timer);
3246 * Disable all of the receiver blocks
3248 bnx_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3249 bnx_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3250 bnx_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3251 bnx_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3252 bnx_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3253 bnx_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3256 * Disable all of the transmit blocks
3258 bnx_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3259 bnx_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3260 bnx_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3261 bnx_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3262 bnx_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3263 bnx_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3266 * Shut down all of the memory managers and related
3269 bnx_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3270 bnx_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3271 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3272 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3274 /* Disable host interrupts. */
3275 bnx_disable_intr(sc);
3278 * Tell firmware we're shutting down.
3280 BNX_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3282 /* Free the RX lists. */
3283 bnx_free_rx_ring_std(&sc->bnx_rx_std_ring);
3285 /* Free jumbo RX list. */
3286 if (BNX_IS_JUMBO_CAPABLE(sc))
3287 bnx_free_rx_ring_jumbo(sc);
3289 /* Free TX buffers. */
3290 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
3291 bnx_free_tx_ring(&sc->bnx_tx_ring[i]);
3293 sc->bnx_status_tag = 0;
3295 sc->bnx_coal_chg = 0;
3297 ifp->if_flags &= ~IFF_RUNNING;
3298 ifq_clr_oactive(&ifp->if_snd);
3303 * Stop all chip I/O so that the kernel's probe routines don't
3304 * get confused by errant DMAs when rebooting.
3307 bnx_shutdown(device_t dev)
3309 struct bnx_softc *sc = device_get_softc(dev);
3310 struct ifnet *ifp = &sc->arpcom.ac_if;
3312 lwkt_serialize_enter(ifp->if_serializer);
3315 lwkt_serialize_exit(ifp->if_serializer);
3319 bnx_suspend(device_t dev)
3321 struct bnx_softc *sc = device_get_softc(dev);
3322 struct ifnet *ifp = &sc->arpcom.ac_if;
3324 lwkt_serialize_enter(ifp->if_serializer);
3326 lwkt_serialize_exit(ifp->if_serializer);
3332 bnx_resume(device_t dev)
3334 struct bnx_softc *sc = device_get_softc(dev);
3335 struct ifnet *ifp = &sc->arpcom.ac_if;
3337 lwkt_serialize_enter(ifp->if_serializer);
3339 if (ifp->if_flags & IFF_UP) {
3342 if (!ifq_is_empty(&ifp->if_snd))
3346 lwkt_serialize_exit(ifp->if_serializer);
3352 bnx_setpromisc(struct bnx_softc *sc)
3354 struct ifnet *ifp = &sc->arpcom.ac_if;
3356 if (ifp->if_flags & IFF_PROMISC)
3357 BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3359 BNX_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3363 bnx_dma_free(struct bnx_softc *sc)
3365 struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring;
3368 /* Destroy RX return rings */
3369 if (sc->bnx_rx_ret_ring != NULL) {
3370 for (i = 0; i < sc->bnx_rx_retcnt; ++i)
3371 bnx_destroy_rx_ret_ring(&sc->bnx_rx_ret_ring[i]);
3372 kfree(sc->bnx_rx_ret_ring, M_DEVBUF);
3375 /* Destroy RX mbuf DMA stuffs. */
3376 if (std->bnx_rx_mtag != NULL) {
3377 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3378 KKASSERT(std->bnx_rx_std_buf[i].bnx_rx_mbuf == NULL);
3379 bus_dmamap_destroy(std->bnx_rx_mtag,
3380 std->bnx_rx_std_buf[i].bnx_rx_dmamap);
3382 bus_dma_tag_destroy(std->bnx_rx_mtag);
3385 /* Destroy standard RX ring */
3386 bnx_dma_block_free(std->bnx_rx_std_ring_tag,
3387 std->bnx_rx_std_ring_map, std->bnx_rx_std_ring);
3389 /* Destroy TX rings */
3390 if (sc->bnx_tx_ring != NULL) {
3391 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
3392 bnx_destroy_tx_ring(&sc->bnx_tx_ring[i]);
3393 kfree(sc->bnx_tx_ring, M_DEVBUF);
3396 if (BNX_IS_JUMBO_CAPABLE(sc))
3397 bnx_free_jumbo_mem(sc);
3399 /* Destroy status block */
3400 bnx_dma_block_free(sc->bnx_cdata.bnx_status_tag,
3401 sc->bnx_cdata.bnx_status_map,
3402 sc->bnx_ldata.bnx_status_block);
3404 /* Destroy the parent tag */
3405 if (sc->bnx_cdata.bnx_parent_tag != NULL)
3406 bus_dma_tag_destroy(sc->bnx_cdata.bnx_parent_tag);
3410 bnx_dma_alloc(device_t dev)
3412 struct bnx_softc *sc = device_get_softc(dev);
3413 struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring;
3417 * Allocate the parent bus DMA tag appropriate for PCI.
3419 * All of the NetExtreme/NetLink controllers have 4GB boundary
3421 * Whenever an address crosses a multiple of the 4GB boundary
3422 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
3423 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
3424 * state machine will lockup and cause the device to hang.
3426 error = bus_dma_tag_create(NULL, 1, BGE_DMA_BOUNDARY_4G,
3427 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
3428 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
3429 0, &sc->bnx_cdata.bnx_parent_tag);
3431 device_printf(dev, "could not create parent DMA tag\n");
3436 * Create DMA stuffs for status block.
3438 error = bnx_dma_block_alloc(sc, BGE_STATUS_BLK_SZ,
3439 &sc->bnx_cdata.bnx_status_tag,
3440 &sc->bnx_cdata.bnx_status_map,
3441 (void *)&sc->bnx_ldata.bnx_status_block,
3442 &sc->bnx_ldata.bnx_status_block_paddr);
3444 device_printf(dev, "could not create status block\n");
3449 * Create DMA tag and maps for RX mbufs.
3452 error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0,
3453 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3454 NULL, NULL, MCLBYTES, 1, MCLBYTES,
3455 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, &std->bnx_rx_mtag);
3457 device_printf(dev, "could not create RX mbuf DMA tag\n");
3461 for (i = 0; i < BGE_STD_RX_RING_CNT; ++i) {
3462 error = bus_dmamap_create(std->bnx_rx_mtag, BUS_DMA_WAITOK,
3463 &std->bnx_rx_std_buf[i].bnx_rx_dmamap);
3467 for (j = 0; j < i; ++j) {
3468 bus_dmamap_destroy(std->bnx_rx_mtag,
3469 std->bnx_rx_std_buf[j].bnx_rx_dmamap);
3471 bus_dma_tag_destroy(std->bnx_rx_mtag);
3472 std->bnx_rx_mtag = NULL;
3475 "could not create %dth RX mbuf DMA map\n", i);
3481 * Create DMA stuffs for standard RX ring.
3483 error = bnx_dma_block_alloc(sc, BGE_STD_RX_RING_SZ,
3484 &std->bnx_rx_std_ring_tag,
3485 &std->bnx_rx_std_ring_map,
3486 (void *)&std->bnx_rx_std_ring,
3487 &std->bnx_rx_std_ring_paddr);
3489 device_printf(dev, "could not create std RX ring\n");
3494 * Create RX return rings
3496 sc->bnx_rx_ret_ring = kmalloc_cachealign(
3497 sizeof(struct bnx_rx_ret_ring) * sc->bnx_rx_retcnt, M_DEVBUF,
3499 for (i = 0; i < sc->bnx_rx_retcnt; ++i) {
3500 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i];
3506 ret->bnx_rx_considx =
3507 &sc->bnx_ldata.bnx_status_block->bge_idx[0].bge_rx_prod_idx;
3509 error = bnx_create_rx_ret_ring(ret);
3512 "could not create %dth RX ret ring\n", i);
3520 mbx = BGE_MBX_TX_HOST_PROD0_LO;
3521 sc->bnx_tx_ring = kmalloc_cachealign(
3522 sizeof(struct bnx_tx_ring) * sc->bnx_tx_ringcnt, M_DEVBUF,
3524 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
3525 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
3528 txr->bnx_tx_mbx = mbx;
3536 txr->bnx_tx_considx =
3537 &sc->bnx_ldata.bnx_status_block->bge_idx[0].bge_tx_cons_idx;
3539 error = bnx_create_tx_ring(txr);
3542 "could not create %dth TX ring\n", i);
3548 * Create jumbo buffer pool.
3550 if (BNX_IS_JUMBO_CAPABLE(sc)) {
3551 error = bnx_alloc_jumbo_mem(sc);
3554 "could not create jumbo buffer pool\n");
3563 bnx_dma_block_alloc(struct bnx_softc *sc, bus_size_t size, bus_dma_tag_t *tag,
3564 bus_dmamap_t *map, void **addr, bus_addr_t *paddr)
3569 error = bus_dmamem_coherent(sc->bnx_cdata.bnx_parent_tag, PAGE_SIZE, 0,
3570 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3571 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3575 *tag = dmem.dmem_tag;
3576 *map = dmem.dmem_map;
3577 *addr = dmem.dmem_addr;
3578 *paddr = dmem.dmem_busaddr;
3584 bnx_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr)
3587 bus_dmamap_unload(tag, map);
3588 bus_dmamem_free(tag, addr, map);
3589 bus_dma_tag_destroy(tag);
3594 bnx_tbi_link_upd(struct bnx_softc *sc, uint32_t status)
3596 struct ifnet *ifp = &sc->arpcom.ac_if;
3598 #define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE)
3601 * Sometimes PCS encoding errors are detected in
3602 * TBI mode (on fiber NICs), and for some reason
3603 * the chip will signal them as link changes.
3604 * If we get a link change event, but the 'PCS
3605 * encoding error' bit in the MAC status register
3606 * is set, don't bother doing a link check.
3607 * This avoids spurious "gigabit link up" messages
3608 * that sometimes appear on fiber NICs during
3609 * periods of heavy traffic.
3611 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3612 if (!sc->bnx_link) {
3614 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704) {
3615 BNX_CLRBIT(sc, BGE_MAC_MODE,
3616 BGE_MACMODE_TBI_SEND_CFGS);
3618 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3621 if_printf(ifp, "link UP\n");
3623 ifp->if_link_state = LINK_STATE_UP;
3624 if_link_state_change(ifp);
3626 } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) {
3631 if_printf(ifp, "link DOWN\n");
3633 ifp->if_link_state = LINK_STATE_DOWN;
3634 if_link_state_change(ifp);
3638 #undef PCS_ENCODE_ERR
3640 /* Clear the attention. */
3641 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3642 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3643 BGE_MACSTAT_LINK_CHANGED);
3647 bnx_copper_link_upd(struct bnx_softc *sc, uint32_t status __unused)
3649 struct ifnet *ifp = &sc->arpcom.ac_if;
3650 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3653 bnx_miibus_statchg(sc->bnx_dev);
3657 if_printf(ifp, "link UP\n");
3659 if_printf(ifp, "link DOWN\n");
3662 /* Clear the attention. */
3663 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3664 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3665 BGE_MACSTAT_LINK_CHANGED);
3669 bnx_autopoll_link_upd(struct bnx_softc *sc, uint32_t status __unused)
3671 struct ifnet *ifp = &sc->arpcom.ac_if;
3672 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3676 if (!sc->bnx_link &&
3677 (mii->mii_media_status & IFM_ACTIVE) &&
3678 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3681 if_printf(ifp, "link UP\n");
3682 } else if (sc->bnx_link &&
3683 (!(mii->mii_media_status & IFM_ACTIVE) ||
3684 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3687 if_printf(ifp, "link DOWN\n");
3690 /* Clear the attention. */
3691 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3692 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3693 BGE_MACSTAT_LINK_CHANGED);
3697 bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS)
3699 struct bnx_softc *sc = arg1;
3701 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3702 &sc->bnx_rx_coal_ticks,
3703 BNX_RX_COAL_TICKS_MIN, BNX_RX_COAL_TICKS_MAX,
3704 BNX_RX_COAL_TICKS_CHG);
3708 bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS)
3710 struct bnx_softc *sc = arg1;
3712 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3713 &sc->bnx_tx_coal_ticks,
3714 BNX_TX_COAL_TICKS_MIN, BNX_TX_COAL_TICKS_MAX,
3715 BNX_TX_COAL_TICKS_CHG);
3719 bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS)
3721 struct bnx_softc *sc = arg1;
3723 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3724 &sc->bnx_rx_coal_bds,
3725 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX,
3726 BNX_RX_COAL_BDS_CHG);
3730 bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS)
3732 struct bnx_softc *sc = arg1;
3734 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3735 &sc->bnx_tx_coal_bds,
3736 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX,
3737 BNX_TX_COAL_BDS_CHG);
3741 bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS)
3743 struct bnx_softc *sc = arg1;
3745 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3746 &sc->bnx_rx_coal_bds_int,
3747 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX,
3748 BNX_RX_COAL_BDS_INT_CHG);
3752 bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS)
3754 struct bnx_softc *sc = arg1;
3756 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3757 &sc->bnx_tx_coal_bds_int,
3758 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX,
3759 BNX_TX_COAL_BDS_INT_CHG);
3763 bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal,
3764 int coal_min, int coal_max, uint32_t coal_chg_mask)
3766 struct bnx_softc *sc = arg1;
3767 struct ifnet *ifp = &sc->arpcom.ac_if;
3770 lwkt_serialize_enter(ifp->if_serializer);
3773 error = sysctl_handle_int(oidp, &v, 0, req);
3774 if (!error && req->newptr != NULL) {
3775 if (v < coal_min || v > coal_max) {
3779 sc->bnx_coal_chg |= coal_chg_mask;
3781 /* Commit changes */
3782 bnx_coal_change(sc);
3786 lwkt_serialize_exit(ifp->if_serializer);
3791 bnx_coal_change(struct bnx_softc *sc)
3793 struct ifnet *ifp = &sc->arpcom.ac_if;
3795 ASSERT_SERIALIZED(ifp->if_serializer);
3797 if (sc->bnx_coal_chg & BNX_RX_COAL_TICKS_CHG) {
3798 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS,
3799 sc->bnx_rx_coal_ticks);
3801 CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
3804 if_printf(ifp, "rx_coal_ticks -> %u\n",
3805 sc->bnx_rx_coal_ticks);
3809 if (sc->bnx_coal_chg & BNX_TX_COAL_TICKS_CHG) {
3810 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS,
3811 sc->bnx_tx_coal_ticks);
3813 CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS);
3816 if_printf(ifp, "tx_coal_ticks -> %u\n",
3817 sc->bnx_tx_coal_ticks);
3821 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_CHG) {
3822 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS,
3823 sc->bnx_rx_coal_bds);
3825 CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
3828 if_printf(ifp, "rx_coal_bds -> %u\n",
3829 sc->bnx_rx_coal_bds);
3833 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_CHG) {
3834 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS,
3835 sc->bnx_tx_coal_bds);
3837 CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS);
3840 if_printf(ifp, "tx_coal_bds -> %u\n",
3841 sc->bnx_tx_coal_bds);
3845 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_INT_CHG) {
3846 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT,
3847 sc->bnx_rx_coal_bds_int);
3849 CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT);
3852 if_printf(ifp, "rx_coal_bds_int -> %u\n",
3853 sc->bnx_rx_coal_bds_int);
3857 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_INT_CHG) {
3858 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT,
3859 sc->bnx_tx_coal_bds_int);
3861 CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT);
3864 if_printf(ifp, "tx_coal_bds_int -> %u\n",
3865 sc->bnx_tx_coal_bds_int);
3869 sc->bnx_coal_chg = 0;
3873 bnx_check_intr(void *xintr)
3875 struct bnx_intr_data *intr = xintr;
3876 struct bnx_rx_ret_ring *ret;
3877 struct bnx_tx_ring *txr;
3880 lwkt_serialize_enter(intr->bnx_intr_serialize);
3882 KKASSERT(mycpuid == intr->bnx_intr_cpuid);
3884 ifp = &intr->bnx_sc->arpcom.ac_if;
3885 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) {
3886 lwkt_serialize_exit(intr->bnx_intr_serialize);
3890 txr = intr->bnx_txr;
3891 ret = intr->bnx_ret;
3893 if (*ret->bnx_rx_considx != ret->bnx_rx_saved_considx ||
3894 *txr->bnx_tx_considx != txr->bnx_tx_saved_considx) {
3895 if (intr->bnx_rx_check_considx == ret->bnx_rx_saved_considx &&
3896 intr->bnx_tx_check_considx == txr->bnx_tx_saved_considx) {
3897 if (!intr->bnx_intr_maylose) {
3898 intr->bnx_intr_maylose = TRUE;
3902 if_printf(ifp, "lost interrupt\n");
3903 intr->bnx_intr_func(intr->bnx_intr_arg);
3906 intr->bnx_intr_maylose = FALSE;
3907 intr->bnx_rx_check_considx = ret->bnx_rx_saved_considx;
3908 intr->bnx_tx_check_considx = txr->bnx_tx_saved_considx;
3911 callout_reset(&intr->bnx_intr_timer, BNX_INTR_CKINTVL,
3912 intr->bnx_intr_check, intr);
3913 lwkt_serialize_exit(intr->bnx_intr_serialize);
3917 bnx_enable_intr(struct bnx_softc *sc)
3919 struct ifnet *ifp = &sc->arpcom.ac_if;
3922 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
3923 lwkt_serialize_handler_enable(
3924 sc->bnx_intr_data[i].bnx_intr_serialize);
3930 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24);
3931 if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) {
3932 /* XXX Linux driver */
3933 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24);
3937 * Unmask the interrupt when we stop polling.
3939 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_MISC_CTL,
3940 BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
3943 * Trigger another interrupt, since above writing
3944 * to interrupt mailbox0 may acknowledge pending
3947 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3949 if (sc->bnx_flags & BNX_FLAG_STATUSTAG_BUG) {
3951 if_printf(ifp, "status tag bug workaround\n");
3953 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
3954 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
3956 intr->bnx_intr_maylose = FALSE;
3957 intr->bnx_rx_check_considx = 0;
3958 intr->bnx_tx_check_considx = 0;
3959 callout_reset_bycpu(&intr->bnx_intr_timer,
3960 BNX_INTR_CKINTVL, intr->bnx_intr_check, intr,
3961 intr->bnx_intr_cpuid);
3967 bnx_disable_intr(struct bnx_softc *sc)
3971 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
3972 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
3974 callout_stop(&intr->bnx_intr_timer);
3975 intr->bnx_intr_maylose = FALSE;
3976 intr->bnx_rx_check_considx = 0;
3977 intr->bnx_tx_check_considx = 0;
3981 * Mask the interrupt when we start polling.
3983 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL,
3984 BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
3987 * Acknowledge possible asserted interrupt.
3989 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3991 sc->bnx_npoll.ifpc_stcount = 0;
3992 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
3993 lwkt_serialize_handler_disable(
3994 sc->bnx_intr_data[i].bnx_intr_serialize);
3999 bnx_get_eaddr_mem(struct bnx_softc *sc, uint8_t ether_addr[])
4004 mac_addr = bnx_readmem_ind(sc, 0x0c14);
4005 if ((mac_addr >> 16) == 0x484b) {
4006 ether_addr[0] = (uint8_t)(mac_addr >> 8);
4007 ether_addr[1] = (uint8_t)mac_addr;
4008 mac_addr = bnx_readmem_ind(sc, 0x0c18);
4009 ether_addr[2] = (uint8_t)(mac_addr >> 24);
4010 ether_addr[3] = (uint8_t)(mac_addr >> 16);
4011 ether_addr[4] = (uint8_t)(mac_addr >> 8);
4012 ether_addr[5] = (uint8_t)mac_addr;
4019 bnx_get_eaddr_nvram(struct bnx_softc *sc, uint8_t ether_addr[])
4021 int mac_offset = BGE_EE_MAC_OFFSET;
4023 if (BNX_IS_5717_PLUS(sc)) {
4026 f = pci_get_function(sc->bnx_dev);
4028 mac_offset = BGE_EE_MAC_OFFSET_5717;
4030 mac_offset += BGE_EE_MAC_OFFSET_5717_OFF;
4033 return bnx_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN);
4037 bnx_get_eaddr_eeprom(struct bnx_softc *sc, uint8_t ether_addr[])
4039 if (sc->bnx_flags & BNX_FLAG_NO_EEPROM)
4042 return bnx_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
4047 bnx_get_eaddr(struct bnx_softc *sc, uint8_t eaddr[])
4049 static const bnx_eaddr_fcn_t bnx_eaddr_funcs[] = {
4050 /* NOTE: Order is critical */
4052 bnx_get_eaddr_nvram,
4053 bnx_get_eaddr_eeprom,
4056 const bnx_eaddr_fcn_t *func;
4058 for (func = bnx_eaddr_funcs; *func != NULL; ++func) {
4059 if ((*func)(sc, eaddr) == 0)
4062 return (*func == NULL ? ENXIO : 0);
4066 * NOTE: 'm' is not freed upon failure
4069 bnx_defrag_shortdma(struct mbuf *m)
4075 * If device receive two back-to-back send BDs with less than
4076 * or equal to 8 total bytes then the device may hang. The two
4077 * back-to-back send BDs must in the same frame for this failure
4078 * to occur. Scan mbuf chains and see whether two back-to-back
4079 * send BDs are there. If this is the case, allocate new mbuf
4080 * and copy the frame to workaround the silicon bug.
4082 for (n = m, found = 0; n != NULL; n = n->m_next) {
4093 n = m_defrag(m, MB_DONTWAIT);
4100 bnx_stop_block(struct bnx_softc *sc, bus_size_t reg, uint32_t bit)
4104 BNX_CLRBIT(sc, reg, bit);
4105 for (i = 0; i < BNX_TIMEOUT; i++) {
4106 if ((CSR_READ_4(sc, reg) & bit) == 0)
4113 bnx_link_poll(struct bnx_softc *sc)
4117 status = CSR_READ_4(sc, BGE_MAC_STS);
4118 if ((status & sc->bnx_link_chg) || sc->bnx_link_evt) {
4119 sc->bnx_link_evt = 0;
4120 sc->bnx_link_upd(sc, status);
4125 bnx_enable_msi(struct bnx_softc *sc)
4129 msi_mode = CSR_READ_4(sc, BGE_MSI_MODE);
4130 msi_mode |= BGE_MSIMODE_ENABLE;
4131 if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) {
4134 * 5718-PG105-R says that "one shot" mode
4135 * does not work if MSI is used, however,
4136 * it obviously works.
4138 msi_mode &= ~BGE_MSIMODE_ONESHOT_DISABLE;
4140 CSR_WRITE_4(sc, BGE_MSI_MODE, msi_mode);
4144 bnx_dma_swap_options(struct bnx_softc *sc)
4146 uint32_t dma_options;
4148 dma_options = BGE_MODECTL_WORDSWAP_NONFRAME |
4149 BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA;
4150 #if BYTE_ORDER == BIG_ENDIAN
4151 dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME;
4153 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
4154 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
4155 dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA |
4156 BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE |
4157 BGE_MODECTL_HTX2B_ENABLE;
4163 bnx_setup_tso(struct bnx_tx_ring *txr, struct mbuf **mp,
4164 uint16_t *mss0, uint16_t *flags0)
4169 int thoff, iphlen, hoff, hlen;
4170 uint16_t flags, mss;
4173 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
4175 hoff = m->m_pkthdr.csum_lhlen;
4176 iphlen = m->m_pkthdr.csum_iphlen;
4177 thoff = m->m_pkthdr.csum_thlen;
4179 KASSERT(hoff > 0, ("invalid ether header len"));
4180 KASSERT(iphlen > 0, ("invalid ip header len"));
4181 KASSERT(thoff > 0, ("invalid tcp header len"));
4183 if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
4184 m = m_pullup(m, hoff + iphlen + thoff);
4191 ip = mtodoff(m, struct ip *, hoff);
4192 th = mtodoff(m, struct tcphdr *, hoff + iphlen);
4194 mss = m->m_pkthdr.tso_segsz;
4195 flags = BGE_TXBDFLAG_CPU_PRE_DMA | BGE_TXBDFLAG_CPU_POST_DMA;
4197 ip->ip_len = htons(mss + iphlen + thoff);
4200 hlen = (iphlen + thoff) >> 2;
4201 mss |= ((hlen & 0x3) << 14);
4202 flags |= ((hlen & 0xf8) << 7) | ((hlen & 0x4) << 2);
4211 bnx_create_tx_ring(struct bnx_tx_ring *txr)
4213 bus_size_t txmaxsz, txmaxsegsz;
4217 * Create DMA tag and maps for TX mbufs.
4219 if (txr->bnx_sc->bnx_flags & BNX_FLAG_TSO)
4220 txmaxsz = IP_MAXPACKET + sizeof(struct ether_vlan_header);
4222 txmaxsz = BNX_JUMBO_FRAMELEN;
4223 if (txr->bnx_sc->bnx_asicrev == BGE_ASICREV_BCM57766)
4224 txmaxsegsz = MCLBYTES;
4226 txmaxsegsz = PAGE_SIZE;
4227 error = bus_dma_tag_create(txr->bnx_sc->bnx_cdata.bnx_parent_tag,
4228 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
4229 txmaxsz, BNX_NSEG_NEW, txmaxsegsz,
4230 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
4233 device_printf(txr->bnx_sc->bnx_dev,
4234 "could not create TX mbuf DMA tag\n");
4238 for (i = 0; i < BGE_TX_RING_CNT; i++) {
4239 error = bus_dmamap_create(txr->bnx_tx_mtag,
4240 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
4241 &txr->bnx_tx_buf[i].bnx_tx_dmamap);
4245 for (j = 0; j < i; ++j) {
4246 bus_dmamap_destroy(txr->bnx_tx_mtag,
4247 txr->bnx_tx_buf[j].bnx_tx_dmamap);
4249 bus_dma_tag_destroy(txr->bnx_tx_mtag);
4250 txr->bnx_tx_mtag = NULL;
4252 device_printf(txr->bnx_sc->bnx_dev,
4253 "could not create TX mbuf DMA map\n");
4259 * Create DMA stuffs for TX ring.
4261 error = bnx_dma_block_alloc(txr->bnx_sc, BGE_TX_RING_SZ,
4262 &txr->bnx_tx_ring_tag,
4263 &txr->bnx_tx_ring_map,
4264 (void *)&txr->bnx_tx_ring,
4265 &txr->bnx_tx_ring_paddr);
4267 device_printf(txr->bnx_sc->bnx_dev,
4268 "could not create TX ring\n");
4272 txr->bnx_tx_flags |= BNX_TX_FLAG_SHORTDMA;
4273 txr->bnx_tx_wreg = BNX_TX_WREG_NSEGS;
4279 bnx_destroy_tx_ring(struct bnx_tx_ring *txr)
4281 /* Destroy TX mbuf DMA stuffs. */
4282 if (txr->bnx_tx_mtag != NULL) {
4285 for (i = 0; i < BGE_TX_RING_CNT; i++) {
4286 KKASSERT(txr->bnx_tx_buf[i].bnx_tx_mbuf == NULL);
4287 bus_dmamap_destroy(txr->bnx_tx_mtag,
4288 txr->bnx_tx_buf[i].bnx_tx_dmamap);
4290 bus_dma_tag_destroy(txr->bnx_tx_mtag);
4293 /* Destroy TX ring */
4294 bnx_dma_block_free(txr->bnx_tx_ring_tag,
4295 txr->bnx_tx_ring_map, txr->bnx_tx_ring);
4299 bnx_sysctl_force_defrag(SYSCTL_HANDLER_ARGS)
4301 struct bnx_softc *sc = (void *)arg1;
4302 struct ifnet *ifp = &sc->arpcom.ac_if;
4303 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0];
4304 int error, defrag, i;
4306 if (txr->bnx_tx_flags & BNX_TX_FLAG_FORCE_DEFRAG)
4311 error = sysctl_handle_int(oidp, &defrag, 0, req);
4312 if (error || req->newptr == NULL)
4315 lwkt_serialize_enter(ifp->if_serializer);
4316 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
4317 txr = &sc->bnx_tx_ring[i];
4319 txr->bnx_tx_flags |= BNX_TX_FLAG_FORCE_DEFRAG;
4321 txr->bnx_tx_flags &= ~BNX_TX_FLAG_FORCE_DEFRAG;
4323 lwkt_serialize_exit(ifp->if_serializer);
4329 bnx_sysctl_tx_wreg(SYSCTL_HANDLER_ARGS)
4331 struct bnx_softc *sc = (void *)arg1;
4332 struct ifnet *ifp = &sc->arpcom.ac_if;
4333 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0];
4334 int error, tx_wreg, i;
4336 tx_wreg = txr->bnx_tx_wreg;
4337 error = sysctl_handle_int(oidp, &tx_wreg, 0, req);
4338 if (error || req->newptr == NULL)
4341 lwkt_serialize_enter(ifp->if_serializer);
4342 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
4343 sc->bnx_tx_ring[i].bnx_tx_wreg = tx_wreg;
4344 lwkt_serialize_exit(ifp->if_serializer);
4350 bnx_create_rx_ret_ring(struct bnx_rx_ret_ring *ret)
4355 * Create DMA stuffs for RX return ring.
4357 error = bnx_dma_block_alloc(ret->bnx_sc,
4358 BGE_RX_RTN_RING_SZ(BNX_RETURN_RING_CNT),
4359 &ret->bnx_rx_ret_ring_tag,
4360 &ret->bnx_rx_ret_ring_map,
4361 (void *)&ret->bnx_rx_ret_ring,
4362 &ret->bnx_rx_ret_ring_paddr);
4364 device_printf(ret->bnx_sc->bnx_dev,
4365 "could not create RX ret ring\n");
4369 /* Shadow standard ring's RX mbuf DMA tag */
4370 ret->bnx_rx_mtag = ret->bnx_std->bnx_rx_mtag;
4373 * Create tmp DMA map for RX mbufs.
4375 error = bus_dmamap_create(ret->bnx_rx_mtag, BUS_DMA_WAITOK,
4376 &ret->bnx_rx_tmpmap);
4378 device_printf(ret->bnx_sc->bnx_dev,
4379 "could not create tmp RX mbuf DMA map\n");
4380 ret->bnx_rx_mtag = NULL;
4387 bnx_destroy_rx_ret_ring(struct bnx_rx_ret_ring *ret)
4389 /* Destroy tmp RX mbuf DMA map */
4390 if (ret->bnx_rx_mtag != NULL)
4391 bus_dmamap_destroy(ret->bnx_rx_mtag, ret->bnx_rx_tmpmap);
4393 /* Destroy RX return ring */
4394 bnx_dma_block_free(ret->bnx_rx_ret_ring_tag,
4395 ret->bnx_rx_ret_ring_map, ret->bnx_rx_ret_ring);
4399 bnx_alloc_intr(struct bnx_softc *sc)
4401 struct bnx_intr_data *intr;
4404 sc->bnx_intr_cnt = 1;
4406 intr = &sc->bnx_intr_data[0];
4408 intr->bnx_ret = &sc->bnx_rx_ret_ring[0];
4409 intr->bnx_txr = &sc->bnx_tx_ring[0];
4410 intr->bnx_intr_serialize = &sc->bnx_main_serialize;
4411 callout_init_mp(&intr->bnx_intr_timer);
4412 intr->bnx_intr_check = bnx_check_intr;
4414 sc->bnx_intr_type = pci_alloc_1intr(sc->bnx_dev, bnx_msi_enable,
4415 &intr->bnx_intr_rid, &intr_flags);
4417 intr->bnx_intr_res = bus_alloc_resource_any(sc->bnx_dev, SYS_RES_IRQ,
4418 &intr->bnx_intr_rid, intr_flags);
4419 if (intr->bnx_intr_res == NULL) {
4420 device_printf(sc->bnx_dev, "could not alloc interrupt\n");
4424 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI) {
4425 sc->bnx_flags |= BNX_FLAG_ONESHOT_MSI;
4428 if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) {
4429 intr->bnx_intr_func = bnx_msi_oneshot;
4431 device_printf(sc->bnx_dev, "oneshot MSI\n");
4433 intr->bnx_intr_func = bnx_msi;
4436 intr->bnx_intr_func = bnx_intr_legacy;
4438 intr->bnx_intr_arg = sc;
4439 intr->bnx_intr_cpuid = rman_get_cpuid(intr->bnx_intr_res);
4441 intr->bnx_txr->bnx_tx_cpuid = intr->bnx_intr_cpuid;
4447 bnx_setup_intr(struct bnx_softc *sc)
4451 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
4452 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
4454 error = bus_setup_intr_descr(sc->bnx_dev, intr->bnx_intr_res,
4455 INTR_MPSAFE, intr->bnx_intr_func, intr->bnx_intr_arg,
4456 &intr->bnx_intr_hand, intr->bnx_intr_serialize,
4457 intr->bnx_intr_desc);
4459 device_printf(sc->bnx_dev,
4460 "could not set up %dth intr\n", i);
4461 bnx_teardown_intr(sc, i);
4469 bnx_teardown_intr(struct bnx_softc *sc, int cnt)
4473 for (i = 0; i < cnt; ++i) {
4474 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
4476 bus_teardown_intr(sc->bnx_dev, intr->bnx_intr_res,
4477 intr->bnx_intr_hand);
4482 bnx_free_intr(struct bnx_softc *sc)
4484 struct bnx_intr_data *intr;
4486 KKASSERT(sc->bnx_intr_cnt <= 1);
4487 intr = &sc->bnx_intr_data[0];
4489 if (intr->bnx_intr_res != NULL) {
4490 bus_release_resource(sc->bnx_dev, SYS_RES_IRQ,
4491 intr->bnx_intr_rid, intr->bnx_intr_res);
4493 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI)
4494 pci_release_msi(sc->bnx_dev);