2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
37 #include "opt_ifpoll.h"
39 #include <sys/param.h>
41 #include <sys/endian.h>
42 #include <sys/kernel.h>
43 #include <sys/interrupt.h>
45 #include <sys/malloc.h>
46 #include <sys/queue.h>
48 #include <sys/serialize.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/sysctl.h>
53 #include <netinet/ip.h>
54 #include <netinet/tcp.h>
57 #include <net/ethernet.h>
59 #include <net/if_arp.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/if_poll.h>
63 #include <net/if_types.h>
64 #include <net/ifq_var.h>
65 #include <net/vlan/if_vlan_var.h>
66 #include <net/vlan/if_vlan_ether.h>
68 #include <dev/netif/mii_layer/mii.h>
69 #include <dev/netif/mii_layer/miivar.h>
70 #include <dev/netif/mii_layer/brgphyreg.h>
72 #include <bus/pci/pcidevs.h>
73 #include <bus/pci/pcireg.h>
74 #include <bus/pci/pcivar.h>
76 #include <dev/netif/bge/if_bgereg.h>
77 #include <dev/netif/bnx/if_bnxvar.h>
79 /* "device miibus" required. See GENERIC if you get errors here. */
80 #include "miibus_if.h"
82 #define BNX_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
84 #define BNX_INTR_CKINTVL ((10 * hz) / 1000) /* 10ms */
86 static const struct bnx_type {
91 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717,
92 "Broadcom BCM5717 Gigabit Ethernet" },
93 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717C,
94 "Broadcom BCM5717C Gigabit Ethernet" },
95 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718,
96 "Broadcom BCM5718 Gigabit Ethernet" },
97 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5719,
98 "Broadcom BCM5719 Gigabit Ethernet" },
99 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720_ALT,
100 "Broadcom BCM5720 Gigabit Ethernet" },
102 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5725,
103 "Broadcom BCM5725 Gigabit Ethernet" },
104 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5727,
105 "Broadcom BCM5727 Gigabit Ethernet" },
106 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5762,
107 "Broadcom BCM5762 Gigabit Ethernet" },
109 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57761,
110 "Broadcom BCM57761 Gigabit Ethernet" },
111 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57762,
112 "Broadcom BCM57762 Gigabit Ethernet" },
113 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57765,
114 "Broadcom BCM57765 Gigabit Ethernet" },
115 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57766,
116 "Broadcom BCM57766 Gigabit Ethernet" },
117 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57781,
118 "Broadcom BCM57781 Gigabit Ethernet" },
119 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57782,
120 "Broadcom BCM57782 Gigabit Ethernet" },
121 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57785,
122 "Broadcom BCM57785 Gigabit Ethernet" },
123 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57786,
124 "Broadcom BCM57786 Gigabit Ethernet" },
125 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57791,
126 "Broadcom BCM57791 Fast Ethernet" },
127 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57795,
128 "Broadcom BCM57795 Fast Ethernet" },
133 static const int bnx_tx_mailbox[BNX_TX_RING_MAX] = {
134 BGE_MBX_TX_HOST_PROD0_LO,
135 BGE_MBX_TX_HOST_PROD0_HI,
136 BGE_MBX_TX_HOST_PROD1_LO,
137 BGE_MBX_TX_HOST_PROD1_HI
140 #define BNX_IS_JUMBO_CAPABLE(sc) ((sc)->bnx_flags & BNX_FLAG_JUMBO)
141 #define BNX_IS_5717_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_5717_PLUS)
142 #define BNX_IS_57765_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_57765_PLUS)
143 #define BNX_IS_57765_FAMILY(sc) \
144 ((sc)->bnx_flags & BNX_FLAG_57765_FAMILY)
146 typedef int (*bnx_eaddr_fcn_t)(struct bnx_softc *, uint8_t[]);
148 static int bnx_probe(device_t);
149 static int bnx_attach(device_t);
150 static int bnx_detach(device_t);
151 static void bnx_shutdown(device_t);
152 static int bnx_suspend(device_t);
153 static int bnx_resume(device_t);
154 static int bnx_miibus_readreg(device_t, int, int);
155 static int bnx_miibus_writereg(device_t, int, int, int);
156 static void bnx_miibus_statchg(device_t);
159 static void bnx_npoll(struct ifnet *, struct ifpoll_info *);
160 static void bnx_npoll_rx(struct ifnet *, void *, int);
161 static void bnx_npoll_tx(struct ifnet *, void *, int);
162 static void bnx_npoll_status(struct ifnet *);
164 static void bnx_intr_legacy(void *);
165 static void bnx_msi(void *);
166 static void bnx_msi_oneshot(void *);
167 static void bnx_intr(struct bnx_softc *);
168 static void bnx_enable_intr(struct bnx_softc *);
169 static void bnx_disable_intr(struct bnx_softc *);
170 static void bnx_txeof(struct bnx_tx_ring *, uint16_t);
171 static void bnx_rxeof(struct bnx_rx_ret_ring *, uint16_t, int);
172 static int bnx_alloc_intr(struct bnx_softc *);
173 static int bnx_setup_intr(struct bnx_softc *);
174 static void bnx_free_intr(struct bnx_softc *);
175 static void bnx_teardown_intr(struct bnx_softc *, int);
176 static void bnx_check_intr(void *);
177 static void bnx_rx_std_refill_ithread(void *);
178 static void bnx_rx_std_refill(void *, void *);
180 static void bnx_start(struct ifnet *, struct ifaltq_subque *);
181 static int bnx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
182 static void bnx_init(void *);
183 static void bnx_stop(struct bnx_softc *);
184 static void bnx_watchdog(struct ifaltq_subque *);
185 static int bnx_ifmedia_upd(struct ifnet *);
186 static void bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *);
187 static void bnx_tick(void *);
188 static void bnx_serialize(struct ifnet *, enum ifnet_serialize);
189 static void bnx_deserialize(struct ifnet *, enum ifnet_serialize);
190 static int bnx_tryserialize(struct ifnet *, enum ifnet_serialize);
192 static void bnx_serialize_assert(struct ifnet *, enum ifnet_serialize,
196 static int bnx_alloc_jumbo_mem(struct bnx_softc *);
197 static void bnx_free_jumbo_mem(struct bnx_softc *);
198 static struct bnx_jslot
199 *bnx_jalloc(struct bnx_softc *);
200 static void bnx_jfree(void *);
201 static void bnx_jref(void *);
202 static int bnx_newbuf_std(struct bnx_rx_ret_ring *, int, int);
203 static int bnx_newbuf_jumbo(struct bnx_softc *, int, int);
204 static void bnx_setup_rxdesc_std(struct bnx_rx_std_ring *, int);
205 static void bnx_setup_rxdesc_jumbo(struct bnx_softc *, int);
206 static int bnx_init_rx_ring_std(struct bnx_rx_std_ring *);
207 static void bnx_free_rx_ring_std(struct bnx_rx_std_ring *);
208 static int bnx_init_rx_ring_jumbo(struct bnx_softc *);
209 static void bnx_free_rx_ring_jumbo(struct bnx_softc *);
210 static void bnx_free_tx_ring(struct bnx_tx_ring *);
211 static int bnx_init_tx_ring(struct bnx_tx_ring *);
212 static int bnx_create_tx_ring(struct bnx_tx_ring *);
213 static void bnx_destroy_tx_ring(struct bnx_tx_ring *);
214 static int bnx_create_rx_ret_ring(struct bnx_rx_ret_ring *);
215 static void bnx_destroy_rx_ret_ring(struct bnx_rx_ret_ring *);
216 static int bnx_dma_alloc(device_t);
217 static void bnx_dma_free(struct bnx_softc *);
218 static int bnx_dma_block_alloc(struct bnx_softc *, bus_size_t,
219 bus_dma_tag_t *, bus_dmamap_t *, void **, bus_addr_t *);
220 static void bnx_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
222 bnx_defrag_shortdma(struct mbuf *);
223 static int bnx_encap(struct bnx_tx_ring *, struct mbuf **,
225 static int bnx_setup_tso(struct bnx_tx_ring *, struct mbuf **,
226 uint16_t *, uint16_t *);
227 static void bnx_setup_serialize(struct bnx_softc *);
228 static void bnx_set_tick_cpuid(struct bnx_softc *, boolean_t);
230 static void bnx_reset(struct bnx_softc *);
231 static int bnx_chipinit(struct bnx_softc *);
232 static int bnx_blockinit(struct bnx_softc *);
233 static void bnx_stop_block(struct bnx_softc *, bus_size_t, uint32_t);
234 static void bnx_enable_msi(struct bnx_softc *sc);
235 static void bnx_setmulti(struct bnx_softc *);
236 static void bnx_setpromisc(struct bnx_softc *);
237 static void bnx_stats_update_regs(struct bnx_softc *);
238 static uint32_t bnx_dma_swap_options(struct bnx_softc *);
240 static uint32_t bnx_readmem_ind(struct bnx_softc *, uint32_t);
241 static void bnx_writemem_ind(struct bnx_softc *, uint32_t, uint32_t);
243 static uint32_t bnx_readreg_ind(struct bnx_softc *, uint32_t);
245 static void bnx_writemem_direct(struct bnx_softc *, uint32_t, uint32_t);
246 static void bnx_writembx(struct bnx_softc *, int, int);
247 static int bnx_read_nvram(struct bnx_softc *, caddr_t, int, int);
248 static uint8_t bnx_eeprom_getbyte(struct bnx_softc *, uint32_t, uint8_t *);
249 static int bnx_read_eeprom(struct bnx_softc *, caddr_t, uint32_t, size_t);
251 static void bnx_tbi_link_upd(struct bnx_softc *, uint32_t);
252 static void bnx_copper_link_upd(struct bnx_softc *, uint32_t);
253 static void bnx_autopoll_link_upd(struct bnx_softc *, uint32_t);
254 static void bnx_link_poll(struct bnx_softc *);
256 static int bnx_get_eaddr_mem(struct bnx_softc *, uint8_t[]);
257 static int bnx_get_eaddr_nvram(struct bnx_softc *, uint8_t[]);
258 static int bnx_get_eaddr_eeprom(struct bnx_softc *, uint8_t[]);
259 static int bnx_get_eaddr(struct bnx_softc *, uint8_t[]);
261 static void bnx_coal_change(struct bnx_softc *);
262 static int bnx_sysctl_force_defrag(SYSCTL_HANDLER_ARGS);
263 static int bnx_sysctl_tx_wreg(SYSCTL_HANDLER_ARGS);
264 static int bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS);
265 static int bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS);
266 static int bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS);
267 static int bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS);
268 static int bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS);
269 static int bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS);
270 static int bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *,
273 static int bnx_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS);
274 static int bnx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS);
275 static int bnx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS);
277 static int bnx_sysctl_std_refill(SYSCTL_HANDLER_ARGS);
279 static int bnx_msi_enable = 1;
280 TUNABLE_INT("hw.bnx.msi.enable", &bnx_msi_enable);
282 static device_method_t bnx_methods[] = {
283 /* Device interface */
284 DEVMETHOD(device_probe, bnx_probe),
285 DEVMETHOD(device_attach, bnx_attach),
286 DEVMETHOD(device_detach, bnx_detach),
287 DEVMETHOD(device_shutdown, bnx_shutdown),
288 DEVMETHOD(device_suspend, bnx_suspend),
289 DEVMETHOD(device_resume, bnx_resume),
292 DEVMETHOD(bus_print_child, bus_generic_print_child),
293 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
296 DEVMETHOD(miibus_readreg, bnx_miibus_readreg),
297 DEVMETHOD(miibus_writereg, bnx_miibus_writereg),
298 DEVMETHOD(miibus_statchg, bnx_miibus_statchg),
303 static DEFINE_CLASS_0(bnx, bnx_driver, bnx_methods, sizeof(struct bnx_softc));
304 static devclass_t bnx_devclass;
306 DECLARE_DUMMY_MODULE(if_bnx);
307 DRIVER_MODULE(if_bnx, pci, bnx_driver, bnx_devclass, NULL, NULL);
308 DRIVER_MODULE(miibus, bnx, miibus_driver, miibus_devclass, NULL, NULL);
311 bnx_readmem_ind(struct bnx_softc *sc, uint32_t off)
313 device_t dev = sc->bnx_dev;
316 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
317 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
318 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
323 bnx_writemem_ind(struct bnx_softc *sc, uint32_t off, uint32_t val)
325 device_t dev = sc->bnx_dev;
327 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
328 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
329 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
333 bnx_writemem_direct(struct bnx_softc *sc, uint32_t off, uint32_t val)
335 CSR_WRITE_4(sc, off, val);
339 bnx_writembx(struct bnx_softc *sc, int off, int val)
341 CSR_WRITE_4(sc, off, val);
345 * Read a sequence of bytes from NVRAM.
348 bnx_read_nvram(struct bnx_softc *sc, caddr_t dest, int off, int cnt)
354 * Read a byte of data stored in the EEPROM at address 'addr.' The
355 * BCM570x supports both the traditional bitbang interface and an
356 * auto access interface for reading the EEPROM. We use the auto
360 bnx_eeprom_getbyte(struct bnx_softc *sc, uint32_t addr, uint8_t *dest)
366 * Enable use of auto EEPROM access so we can avoid
367 * having to use the bitbang method.
369 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
371 /* Reset the EEPROM, load the clock period. */
372 CSR_WRITE_4(sc, BGE_EE_ADDR,
373 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
376 /* Issue the read EEPROM command. */
377 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
379 /* Wait for completion */
380 for(i = 0; i < BNX_TIMEOUT * 10; i++) {
382 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
386 if (i == BNX_TIMEOUT) {
387 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
392 byte = CSR_READ_4(sc, BGE_EE_DATA);
394 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
400 * Read a sequence of bytes from the EEPROM.
403 bnx_read_eeprom(struct bnx_softc *sc, caddr_t dest, uint32_t off, size_t len)
409 for (byte = 0, err = 0, i = 0; i < len; i++) {
410 err = bnx_eeprom_getbyte(sc, off + i, &byte);
420 bnx_miibus_readreg(device_t dev, int phy, int reg)
422 struct bnx_softc *sc = device_get_softc(dev);
426 KASSERT(phy == sc->bnx_phyno,
427 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno));
429 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
430 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
431 CSR_WRITE_4(sc, BGE_MI_MODE,
432 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
436 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
437 BGE_MIPHY(phy) | BGE_MIREG(reg));
439 /* Poll for the PHY register access to complete. */
440 for (i = 0; i < BNX_TIMEOUT; i++) {
442 val = CSR_READ_4(sc, BGE_MI_COMM);
443 if ((val & BGE_MICOMM_BUSY) == 0) {
445 val = CSR_READ_4(sc, BGE_MI_COMM);
449 if (i == BNX_TIMEOUT) {
450 if_printf(&sc->arpcom.ac_if, "PHY read timed out "
451 "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val);
455 /* Restore the autopoll bit if necessary. */
456 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
457 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
461 if (val & BGE_MICOMM_READFAIL)
464 return (val & 0xFFFF);
468 bnx_miibus_writereg(device_t dev, int phy, int reg, int val)
470 struct bnx_softc *sc = device_get_softc(dev);
473 KASSERT(phy == sc->bnx_phyno,
474 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno));
476 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
477 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
478 CSR_WRITE_4(sc, BGE_MI_MODE,
479 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
483 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
484 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
486 for (i = 0; i < BNX_TIMEOUT; i++) {
488 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
490 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
494 if (i == BNX_TIMEOUT) {
495 if_printf(&sc->arpcom.ac_if, "PHY write timed out "
496 "(phy %d, reg %d, val %d)\n", phy, reg, val);
499 /* Restore the autopoll bit if necessary. */
500 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
501 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
509 bnx_miibus_statchg(device_t dev)
511 struct bnx_softc *sc;
512 struct mii_data *mii;
514 sc = device_get_softc(dev);
515 mii = device_get_softc(sc->bnx_miibus);
517 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
518 (IFM_ACTIVE | IFM_AVALID)) {
519 switch (IFM_SUBTYPE(mii->mii_media_active)) {
536 if (sc->bnx_link == 0)
539 BNX_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
540 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
541 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
542 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
544 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
547 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
548 BNX_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
550 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
555 * Memory management for jumbo frames.
558 bnx_alloc_jumbo_mem(struct bnx_softc *sc)
560 struct ifnet *ifp = &sc->arpcom.ac_if;
561 struct bnx_jslot *entry;
567 * Create tag for jumbo mbufs.
568 * This is really a bit of a kludge. We allocate a special
569 * jumbo buffer pool which (thanks to the way our DMA
570 * memory allocation works) will consist of contiguous
571 * pages. This means that even though a jumbo buffer might
572 * be larger than a page size, we don't really need to
573 * map it into more than one DMA segment. However, the
574 * default mbuf tag will result in multi-segment mappings,
575 * so we have to create a special jumbo mbuf tag that
576 * lets us get away with mapping the jumbo buffers as
577 * a single segment. I think eventually the driver should
578 * be changed so that it uses ordinary mbufs and cluster
579 * buffers, i.e. jumbo frames can span multiple DMA
580 * descriptors. But that's a project for another day.
584 * Create DMA stuffs for jumbo RX ring.
586 error = bnx_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ,
587 &sc->bnx_cdata.bnx_rx_jumbo_ring_tag,
588 &sc->bnx_cdata.bnx_rx_jumbo_ring_map,
589 (void *)&sc->bnx_ldata.bnx_rx_jumbo_ring,
590 &sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
592 if_printf(ifp, "could not create jumbo RX ring\n");
597 * Create DMA stuffs for jumbo buffer block.
599 error = bnx_dma_block_alloc(sc, BNX_JMEM,
600 &sc->bnx_cdata.bnx_jumbo_tag,
601 &sc->bnx_cdata.bnx_jumbo_map,
602 (void **)&sc->bnx_ldata.bnx_jumbo_buf,
605 if_printf(ifp, "could not create jumbo buffer\n");
609 SLIST_INIT(&sc->bnx_jfree_listhead);
612 * Now divide it up into 9K pieces and save the addresses
613 * in an array. Note that we play an evil trick here by using
614 * the first few bytes in the buffer to hold the the address
615 * of the softc structure for this interface. This is because
616 * bnx_jfree() needs it, but it is called by the mbuf management
617 * code which will not pass it to us explicitly.
619 for (i = 0, ptr = sc->bnx_ldata.bnx_jumbo_buf; i < BNX_JSLOTS; i++) {
620 entry = &sc->bnx_cdata.bnx_jslots[i];
622 entry->bnx_buf = ptr;
623 entry->bnx_paddr = paddr;
624 entry->bnx_inuse = 0;
626 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead, entry, jslot_link);
635 bnx_free_jumbo_mem(struct bnx_softc *sc)
637 /* Destroy jumbo RX ring. */
638 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_jumbo_ring_tag,
639 sc->bnx_cdata.bnx_rx_jumbo_ring_map,
640 sc->bnx_ldata.bnx_rx_jumbo_ring);
642 /* Destroy jumbo buffer block. */
643 bnx_dma_block_free(sc->bnx_cdata.bnx_jumbo_tag,
644 sc->bnx_cdata.bnx_jumbo_map,
645 sc->bnx_ldata.bnx_jumbo_buf);
649 * Allocate a jumbo buffer.
651 static struct bnx_jslot *
652 bnx_jalloc(struct bnx_softc *sc)
654 struct bnx_jslot *entry;
656 lwkt_serialize_enter(&sc->bnx_jslot_serializer);
657 entry = SLIST_FIRST(&sc->bnx_jfree_listhead);
659 SLIST_REMOVE_HEAD(&sc->bnx_jfree_listhead, jslot_link);
660 entry->bnx_inuse = 1;
662 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
664 lwkt_serialize_exit(&sc->bnx_jslot_serializer);
669 * Adjust usage count on a jumbo buffer.
674 struct bnx_jslot *entry = (struct bnx_jslot *)arg;
675 struct bnx_softc *sc = entry->bnx_sc;
678 panic("bnx_jref: can't find softc pointer!");
680 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) {
681 panic("bnx_jref: asked to reference buffer "
682 "that we don't manage!");
683 } else if (entry->bnx_inuse == 0) {
684 panic("bnx_jref: buffer already free!");
686 atomic_add_int(&entry->bnx_inuse, 1);
691 * Release a jumbo buffer.
696 struct bnx_jslot *entry = (struct bnx_jslot *)arg;
697 struct bnx_softc *sc = entry->bnx_sc;
700 panic("bnx_jfree: can't find softc pointer!");
702 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) {
703 panic("bnx_jfree: asked to free buffer that we don't manage!");
704 } else if (entry->bnx_inuse == 0) {
705 panic("bnx_jfree: buffer already free!");
708 * Possible MP race to 0, use the serializer. The atomic insn
709 * is still needed for races against bnx_jref().
711 lwkt_serialize_enter(&sc->bnx_jslot_serializer);
712 atomic_subtract_int(&entry->bnx_inuse, 1);
713 if (entry->bnx_inuse == 0) {
714 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead,
717 lwkt_serialize_exit(&sc->bnx_jslot_serializer);
723 * Intialize a standard receive ring descriptor.
726 bnx_newbuf_std(struct bnx_rx_ret_ring *ret, int i, int init)
728 struct mbuf *m_new = NULL;
729 bus_dma_segment_t seg;
732 struct bnx_rx_buf *rb;
734 rb = &ret->bnx_std->bnx_rx_std_buf[i];
735 KASSERT(!rb->bnx_rx_refilled, ("RX buf %dth has been refilled", i));
737 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
742 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
743 m_adj(m_new, ETHER_ALIGN);
745 error = bus_dmamap_load_mbuf_segment(ret->bnx_rx_mtag,
746 ret->bnx_rx_tmpmap, m_new, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
753 bus_dmamap_sync(ret->bnx_rx_mtag, rb->bnx_rx_dmamap,
754 BUS_DMASYNC_POSTREAD);
755 bus_dmamap_unload(ret->bnx_rx_mtag, rb->bnx_rx_dmamap);
758 map = ret->bnx_rx_tmpmap;
759 ret->bnx_rx_tmpmap = rb->bnx_rx_dmamap;
761 rb->bnx_rx_dmamap = map;
762 rb->bnx_rx_mbuf = m_new;
763 rb->bnx_rx_paddr = seg.ds_addr;
766 rb->bnx_rx_refilled = 1;
771 bnx_setup_rxdesc_std(struct bnx_rx_std_ring *std, int i)
773 struct bnx_rx_buf *rb;
776 rb = &std->bnx_rx_std_buf[i];
777 KASSERT(rb->bnx_rx_refilled, ("RX buf %dth is not refilled", i));
778 rb->bnx_rx_refilled = 0;
780 r = &std->bnx_rx_std_ring[i];
781 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rb->bnx_rx_paddr);
782 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rb->bnx_rx_paddr);
783 r->bge_len = rb->bnx_rx_mbuf->m_len;
785 r->bge_flags = BGE_RXBDFLAG_END;
789 * Initialize a jumbo receive ring descriptor. This allocates
790 * a jumbo buffer from the pool managed internally by the driver.
793 bnx_newbuf_jumbo(struct bnx_softc *sc, int i, int init)
795 struct mbuf *m_new = NULL;
796 struct bnx_jslot *buf;
799 /* Allocate the mbuf. */
800 MGETHDR(m_new, init ? MB_WAIT : MB_DONTWAIT, MT_DATA);
804 /* Allocate the jumbo buffer */
805 buf = bnx_jalloc(sc);
811 /* Attach the buffer to the mbuf. */
812 m_new->m_ext.ext_arg = buf;
813 m_new->m_ext.ext_buf = buf->bnx_buf;
814 m_new->m_ext.ext_free = bnx_jfree;
815 m_new->m_ext.ext_ref = bnx_jref;
816 m_new->m_ext.ext_size = BNX_JUMBO_FRAMELEN;
818 m_new->m_flags |= M_EXT;
820 m_new->m_data = m_new->m_ext.ext_buf;
821 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
823 paddr = buf->bnx_paddr;
824 m_adj(m_new, ETHER_ALIGN);
825 paddr += ETHER_ALIGN;
827 /* Save necessary information */
828 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_rx_mbuf = m_new;
829 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_rx_paddr = paddr;
831 /* Set up the descriptor. */
832 bnx_setup_rxdesc_jumbo(sc, i);
837 bnx_setup_rxdesc_jumbo(struct bnx_softc *sc, int i)
840 struct bnx_rx_buf *rc;
842 r = &sc->bnx_ldata.bnx_rx_jumbo_ring[i];
843 rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i];
845 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_rx_paddr);
846 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_rx_paddr);
847 r->bge_len = rc->bnx_rx_mbuf->m_len;
849 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
853 bnx_init_rx_ring_std(struct bnx_rx_std_ring *std)
857 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
858 /* Use the first RX return ring's tmp RX mbuf DMA map */
859 error = bnx_newbuf_std(&std->bnx_sc->bnx_rx_ret_ring[0], i, 1);
862 bnx_setup_rxdesc_std(std, i);
865 std->bnx_rx_std_refill = 0;
866 std->bnx_rx_std_running = 0;
868 lwkt_serialize_handler_enable(&std->bnx_rx_std_serialize);
870 std->bnx_rx_std = BGE_STD_RX_RING_CNT - 1;
871 bnx_writembx(std->bnx_sc, BGE_MBX_RX_STD_PROD_LO, std->bnx_rx_std);
877 bnx_free_rx_ring_std(struct bnx_rx_std_ring *std)
881 lwkt_serialize_handler_disable(&std->bnx_rx_std_serialize);
883 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
884 struct bnx_rx_buf *rb = &std->bnx_rx_std_buf[i];
886 rb->bnx_rx_refilled = 0;
887 if (rb->bnx_rx_mbuf != NULL) {
888 bus_dmamap_unload(std->bnx_rx_mtag, rb->bnx_rx_dmamap);
889 m_freem(rb->bnx_rx_mbuf);
890 rb->bnx_rx_mbuf = NULL;
892 bzero(&std->bnx_rx_std_ring[i], sizeof(struct bge_rx_bd));
897 bnx_init_rx_ring_jumbo(struct bnx_softc *sc)
902 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
903 error = bnx_newbuf_jumbo(sc, i, 1);
908 sc->bnx_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
910 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb;
911 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
912 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
914 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo);
920 bnx_free_rx_ring_jumbo(struct bnx_softc *sc)
924 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
925 struct bnx_rx_buf *rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i];
927 if (rc->bnx_rx_mbuf != NULL) {
928 m_freem(rc->bnx_rx_mbuf);
929 rc->bnx_rx_mbuf = NULL;
931 bzero(&sc->bnx_ldata.bnx_rx_jumbo_ring[i],
932 sizeof(struct bge_rx_bd));
937 bnx_free_tx_ring(struct bnx_tx_ring *txr)
941 for (i = 0; i < BGE_TX_RING_CNT; i++) {
942 struct bnx_tx_buf *buf = &txr->bnx_tx_buf[i];
944 if (buf->bnx_tx_mbuf != NULL) {
945 bus_dmamap_unload(txr->bnx_tx_mtag,
947 m_freem(buf->bnx_tx_mbuf);
948 buf->bnx_tx_mbuf = NULL;
950 bzero(&txr->bnx_tx_ring[i], sizeof(struct bge_tx_bd));
952 txr->bnx_tx_saved_considx = BNX_TXCONS_UNSET;
956 bnx_init_tx_ring(struct bnx_tx_ring *txr)
959 txr->bnx_tx_saved_considx = 0;
960 txr->bnx_tx_prodidx = 0;
962 /* Initialize transmit producer index for host-memory send ring. */
963 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, txr->bnx_tx_prodidx);
969 bnx_setmulti(struct bnx_softc *sc)
972 struct ifmultiaddr *ifma;
973 uint32_t hashes[4] = { 0, 0, 0, 0 };
976 ifp = &sc->arpcom.ac_if;
978 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
979 for (i = 0; i < 4; i++)
980 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
984 /* First, zot all the existing filters. */
985 for (i = 0; i < 4; i++)
986 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
988 /* Now program new ones. */
989 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
990 if (ifma->ifma_addr->sa_family != AF_LINK)
993 LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
994 ETHER_ADDR_LEN) & 0x7f;
995 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
998 for (i = 0; i < 4; i++)
999 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1003 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1004 * self-test results.
1007 bnx_chipinit(struct bnx_softc *sc)
1009 uint32_t dma_rw_ctl, mode_ctl;
1012 /* Set endian type before we access any non-PCI registers. */
1013 pci_write_config(sc->bnx_dev, BGE_PCI_MISC_CTL,
1014 BGE_INIT | BGE_PCIMISCCTL_TAGGED_STATUS, 4);
1016 /* Clear the MAC control register */
1017 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1020 * Clear the MAC statistics block in the NIC's
1023 for (i = BGE_STATS_BLOCK;
1024 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1025 BNX_MEMWIN_WRITE(sc, i, 0);
1027 for (i = BGE_STATUS_BLOCK;
1028 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1029 BNX_MEMWIN_WRITE(sc, i, 0);
1031 if (BNX_IS_57765_FAMILY(sc)) {
1034 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) {
1035 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
1036 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS;
1038 /* Access the lower 1K of PL PCI-E block registers. */
1039 CSR_WRITE_4(sc, BGE_MODE_CTL,
1040 val | BGE_MODECTL_PCIE_PL_SEL);
1042 val = CSR_READ_4(sc, BGE_PCIE_PL_LO_PHYCTL5);
1043 val |= BGE_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ;
1044 CSR_WRITE_4(sc, BGE_PCIE_PL_LO_PHYCTL5, val);
1046 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1048 if (sc->bnx_chiprev != BGE_CHIPREV_57765_AX) {
1049 /* Fix transmit hangs */
1050 val = CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL);
1051 val |= BGE_CPMU_PADRNG_CTL_RDIV2;
1052 CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL, val);
1054 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
1055 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS;
1057 /* Access the lower 1K of DL PCI-E block registers. */
1058 CSR_WRITE_4(sc, BGE_MODE_CTL,
1059 val | BGE_MODECTL_PCIE_DL_SEL);
1061 val = CSR_READ_4(sc, BGE_PCIE_DL_LO_FTSMAX);
1062 val &= ~BGE_PCIE_DL_LO_FTSMAX_MASK;
1063 val |= BGE_PCIE_DL_LO_FTSMAX_VAL;
1064 CSR_WRITE_4(sc, BGE_PCIE_DL_LO_FTSMAX, val);
1066 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1069 val = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK);
1070 val &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK;
1071 val |= BGE_CPMU_LSPD_10MB_MACCLK_6_25;
1072 CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, val);
1076 * Set up the PCI DMA control register.
1078 dma_rw_ctl = pci_read_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, 4);
1080 * Disable 32bytes cache alignment for DMA write to host memory
1083 * 64bytes cache alignment for DMA write to host memory is still
1086 dma_rw_ctl |= BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1087 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0)
1088 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1090 * Enable HW workaround for controllers that misinterpret
1091 * a status tag update and leave interrupts permanently
1094 if (sc->bnx_asicrev != BGE_ASICREV_BCM5717 &&
1095 sc->bnx_asicrev != BGE_ASICREV_BCM5762 &&
1096 !BNX_IS_57765_FAMILY(sc))
1097 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1099 if_printf(&sc->arpcom.ac_if, "DMA read/write %#x\n",
1102 pci_write_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1105 * Set up general mode register.
1107 mode_ctl = bnx_dma_swap_options(sc) | BGE_MODECTL_MAC_ATTN_INTR |
1108 BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM;
1109 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1112 * Disable memory write invalidate. Apparently it is not supported
1113 * properly by these devices. Also ensure that INTx isn't disabled,
1114 * as these chips need it even when using MSI.
1116 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_CMD,
1117 (PCIM_CMD_MWRICEN | PCIM_CMD_INTxDIS), 4);
1119 /* Set the timer prescaler (always 66Mhz) */
1120 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1126 bnx_blockinit(struct bnx_softc *sc)
1128 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0];
1129 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0];
1130 struct bge_rcb *rcb;
1137 * Initialize the memory window pointer register so that
1138 * we can access the first 32K of internal NIC RAM. This will
1139 * allow us to set up the TX send ring RCBs and the RX return
1140 * ring RCBs, plus other things which live in NIC memory.
1142 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1144 /* Configure mbuf pool watermarks */
1145 if (BNX_IS_57765_PLUS(sc)) {
1146 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1147 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) {
1148 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1149 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1151 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1152 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1155 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1156 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1157 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1160 /* Configure DMA resource watermarks */
1161 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1162 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1164 /* Enable buffer manager */
1165 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1167 * Change the arbitration algorithm of TXMBUF read request to
1168 * round-robin instead of priority based for BCM5719. When
1169 * TXFIFO is almost empty, RDMA will hold its request until
1170 * TXFIFO is not almost empty.
1172 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719)
1173 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
1174 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1175 sc->bnx_chipid == BGE_CHIPID_BCM5719_A0 ||
1176 sc->bnx_chipid == BGE_CHIPID_BCM5720_A0)
1177 val |= BGE_BMANMODE_LOMBUF_ATTN;
1178 CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1180 /* Poll for buffer manager start indication */
1181 for (i = 0; i < BNX_TIMEOUT; i++) {
1182 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1187 if (i == BNX_TIMEOUT) {
1188 if_printf(&sc->arpcom.ac_if,
1189 "buffer manager failed to start\n");
1193 /* Enable flow-through queues */
1194 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1195 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1197 /* Wait until queue initialization is complete */
1198 for (i = 0; i < BNX_TIMEOUT; i++) {
1199 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1204 if (i == BNX_TIMEOUT) {
1205 if_printf(&sc->arpcom.ac_if,
1206 "flow-through queue init failed\n");
1211 * Summary of rings supported by the controller:
1213 * Standard Receive Producer Ring
1214 * - This ring is used to feed receive buffers for "standard"
1215 * sized frames (typically 1536 bytes) to the controller.
1217 * Jumbo Receive Producer Ring
1218 * - This ring is used to feed receive buffers for jumbo sized
1219 * frames (i.e. anything bigger than the "standard" frames)
1220 * to the controller.
1222 * Mini Receive Producer Ring
1223 * - This ring is used to feed receive buffers for "mini"
1224 * sized frames to the controller.
1225 * - This feature required external memory for the controller
1226 * but was never used in a production system. Should always
1229 * Receive Return Ring
1230 * - After the controller has placed an incoming frame into a
1231 * receive buffer that buffer is moved into a receive return
1232 * ring. The driver is then responsible to passing the
1233 * buffer up to the stack. Many versions of the controller
1234 * support multiple RR rings.
1237 * - This ring is used for outgoing frames. Many versions of
1238 * the controller support multiple send rings.
1241 /* Initialize the standard receive producer ring control block. */
1242 rcb = &sc->bnx_ldata.bnx_info.bnx_std_rx_rcb;
1243 rcb->bge_hostaddr.bge_addr_lo =
1244 BGE_ADDR_LO(sc->bnx_rx_std_ring.bnx_rx_std_ring_paddr);
1245 rcb->bge_hostaddr.bge_addr_hi =
1246 BGE_ADDR_HI(sc->bnx_rx_std_ring.bnx_rx_std_ring_paddr);
1247 if (BNX_IS_57765_PLUS(sc)) {
1249 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1250 * Bits 15-2 : Maximum RX frame size
1251 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
1254 rcb->bge_maxlen_flags =
1255 BGE_RCB_MAXLEN_FLAGS(512, BNX_MAX_FRAMELEN << 2);
1258 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1259 * Bits 15-2 : Reserved (should be 0)
1260 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1263 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1265 if (BNX_IS_5717_PLUS(sc))
1266 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1268 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1269 /* Write the standard receive producer ring control block. */
1270 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1271 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1272 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1273 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1274 /* Reset the standard receive producer ring producer index. */
1275 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1278 * Initialize the jumbo RX producer ring control
1279 * block. We set the 'ring disabled' bit in the
1280 * flags field until we're actually ready to start
1281 * using this ring (i.e. once we set the MTU
1282 * high enough to require it).
1284 if (BNX_IS_JUMBO_CAPABLE(sc)) {
1285 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb;
1286 /* Get the jumbo receive producer ring RCB parameters. */
1287 rcb->bge_hostaddr.bge_addr_lo =
1288 BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
1289 rcb->bge_hostaddr.bge_addr_hi =
1290 BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
1291 rcb->bge_maxlen_flags =
1292 BGE_RCB_MAXLEN_FLAGS(BNX_MAX_FRAMELEN,
1293 BGE_RCB_FLAG_RING_DISABLED);
1294 if (BNX_IS_5717_PLUS(sc))
1295 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1297 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1298 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1299 rcb->bge_hostaddr.bge_addr_hi);
1300 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1301 rcb->bge_hostaddr.bge_addr_lo);
1302 /* Program the jumbo receive producer ring RCB parameters. */
1303 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1304 rcb->bge_maxlen_flags);
1305 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1306 /* Reset the jumbo receive producer ring producer index. */
1307 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1311 * The BD ring replenish thresholds control how often the
1312 * hardware fetches new BD's from the producer rings in host
1313 * memory. Setting the value too low on a busy system can
1314 * starve the hardware and recue the throughpout.
1316 * Set the BD ring replentish thresholds. The recommended
1317 * values are 1/8th the number of descriptors allocated to
1321 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1322 if (BNX_IS_JUMBO_CAPABLE(sc)) {
1323 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1324 BGE_JUMBO_RX_RING_CNT/8);
1326 if (BNX_IS_57765_PLUS(sc)) {
1327 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1328 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1332 * Disable all send rings by setting the 'ring disabled' bit
1333 * in the flags field of all the TX send ring control blocks,
1334 * located in NIC memory.
1336 if (BNX_IS_5717_PLUS(sc))
1338 else if (BNX_IS_57765_FAMILY(sc) ||
1339 sc->bnx_asicrev == BGE_ASICREV_BCM5762)
1343 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1344 for (i = 0; i < limit; i++) {
1345 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1346 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1347 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1348 vrcb += sizeof(struct bge_rcb);
1351 /* Configure send ring RCB 0 (we use only the first ring) */
1352 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1353 BGE_HOSTADDR(taddr, txr->bnx_tx_ring_paddr);
1354 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1355 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1356 if (BNX_IS_5717_PLUS(sc)) {
1357 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
1359 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1360 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1362 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1363 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1366 * Disable all receive return rings by setting the
1367 * 'ring disabled' bit in the flags field of all the receive
1368 * return ring control blocks, located in NIC memory.
1370 if (BNX_IS_5717_PLUS(sc)) {
1371 /* Should be 17, use 16 until we get an SRAM map. */
1373 } else if (BNX_IS_57765_FAMILY(sc) ||
1374 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1379 /* Disable all receive return rings. */
1380 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1381 for (i = 0; i < limit; i++) {
1382 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1383 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1384 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1385 BGE_RCB_FLAG_RING_DISABLED);
1386 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1387 bnx_writembx(sc, BGE_MBX_RX_CONS0_LO +
1388 (i * (sizeof(uint64_t))), 0);
1389 vrcb += sizeof(struct bge_rcb);
1393 * Set up receive return ring 0. Note that the NIC address
1394 * for RX return rings is 0x0. The return rings live entirely
1395 * within the host, so the nicaddr field in the RCB isn't used.
1397 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1398 BGE_HOSTADDR(taddr, ret->bnx_rx_ret_ring_paddr);
1399 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1400 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1401 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1402 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1403 BGE_RCB_MAXLEN_FLAGS(BNX_RETURN_RING_CNT, 0));
1405 /* Set random backoff seed for TX */
1406 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1407 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1408 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1409 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1410 BGE_TX_BACKOFF_SEED_MASK);
1412 /* Set inter-packet gap */
1414 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
1415 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1416 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
1417 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
1419 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
1422 * Specify which ring to use for packets that don't match
1425 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1428 * Configure number of RX lists. One interrupt distribution
1429 * list, sixteen active lists, one bad frames class.
1431 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1433 /* Inialize RX list placement stats mask. */
1434 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1435 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1437 /* Disable host coalescing until we get it set up */
1438 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1440 /* Poll to make sure it's shut down. */
1441 for (i = 0; i < BNX_TIMEOUT; i++) {
1442 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1447 if (i == BNX_TIMEOUT) {
1448 if_printf(&sc->arpcom.ac_if,
1449 "host coalescing engine failed to idle\n");
1453 /* Set up host coalescing defaults */
1454 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bnx_rx_coal_ticks);
1455 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bnx_tx_coal_ticks);
1456 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bnx_rx_coal_bds);
1457 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bnx_tx_coal_bds);
1458 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, sc->bnx_rx_coal_bds_int);
1459 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, sc->bnx_tx_coal_bds_int);
1461 /* Set up address of status block */
1462 bzero(sc->bnx_ldata.bnx_status_block, BGE_STATUS_BLK_SZ);
1463 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1464 BGE_ADDR_HI(sc->bnx_ldata.bnx_status_block_paddr));
1465 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1466 BGE_ADDR_LO(sc->bnx_ldata.bnx_status_block_paddr));
1468 /* Set up status block partail update size. */
1469 val = BGE_STATBLKSZ_32BYTE;
1472 * Does not seem to have visible effect in both
1473 * bulk data (1472B UDP datagram) and tiny data
1474 * (18B UDP datagram) TX tests.
1476 val |= BGE_HCCMODE_CLRTICK_TX;
1478 /* Turn on host coalescing state machine */
1479 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1481 /* Turn on RX BD completion state machine and enable attentions */
1482 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1483 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1485 /* Turn on RX list placement state machine */
1486 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1488 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1489 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1490 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1491 BGE_MACMODE_FRMHDR_DMA_ENB;
1493 if (sc->bnx_flags & BNX_FLAG_TBI)
1494 val |= BGE_PORTMODE_TBI;
1495 else if (sc->bnx_flags & BNX_FLAG_MII_SERDES)
1496 val |= BGE_PORTMODE_GMII;
1498 val |= BGE_PORTMODE_MII;
1500 /* Turn on DMA, clear stats */
1501 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1503 /* Set misc. local control, enable interrupts on attentions */
1504 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1507 /* Assert GPIO pins for PHY reset */
1508 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1509 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1510 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1511 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1514 /* Turn on write DMA state machine */
1515 val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1516 /* Enable host coalescing bug fix. */
1517 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1518 if (sc->bnx_asicrev == BGE_ASICREV_BCM5785) {
1519 /* Request larger DMA burst size to get better performance. */
1520 val |= BGE_WDMAMODE_BURST_ALL_DATA;
1522 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1525 if (BNX_IS_57765_PLUS(sc)) {
1526 uint32_t dmactl, dmactl_reg;
1528 if (sc->bnx_asicrev == BGE_ASICREV_BCM5762)
1529 dmactl_reg = BGE_RDMA_RSRVCTRL2;
1531 dmactl_reg = BGE_RDMA_RSRVCTRL;
1533 dmactl = CSR_READ_4(sc, dmactl_reg);
1535 * Adjust tx margin to prevent TX data corruption and
1536 * fix internal FIFO overflow.
1538 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1539 sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
1540 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1541 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
1542 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
1543 BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
1544 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
1545 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
1546 BGE_RDMA_RSRVCTRL_TXMRGN_320B;
1549 * Enable fix for read DMA FIFO overruns.
1550 * The fix is to limit the number of RX BDs
1551 * the hardware would fetch at a fime.
1553 CSR_WRITE_4(sc, dmactl_reg,
1554 dmactl | BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
1557 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) {
1558 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
1559 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
1560 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
1561 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
1562 } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
1563 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1566 if (sc->bnx_asicrev == BGE_ASICREV_BCM5762)
1567 ctrl_reg = BGE_RDMA_LSO_CRPTEN_CTRL2;
1569 ctrl_reg = BGE_RDMA_LSO_CRPTEN_CTRL;
1572 * Allow 4KB burst length reads for non-LSO frames.
1573 * Enable 512B burst length reads for buffer descriptors.
1575 CSR_WRITE_4(sc, ctrl_reg,
1576 CSR_READ_4(sc, ctrl_reg) |
1577 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
1578 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
1581 /* Turn on read DMA state machine */
1582 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1583 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717)
1584 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
1585 if (sc->bnx_asicrev == BGE_ASICREV_BCM5784 ||
1586 sc->bnx_asicrev == BGE_ASICREV_BCM5785 ||
1587 sc->bnx_asicrev == BGE_ASICREV_BCM57780) {
1588 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1589 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1590 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1592 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
1593 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1594 val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
1595 BGE_RDMAMODE_H2BNC_VLAN_DET;
1597 * Allow multiple outstanding read requests from
1598 * non-LSO read DMA engine.
1600 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
1602 if (sc->bnx_asicrev == BGE_ASICREV_BCM57766)
1603 val |= BGE_RDMAMODE_JMB_2K_MMRR;
1604 if (sc->bnx_flags & BNX_FLAG_TSO)
1605 val |= BGE_RDMAMODE_TSO4_ENABLE;
1606 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1607 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1610 /* Turn on RX data completion state machine */
1611 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1613 /* Turn on RX BD initiator state machine */
1614 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1616 /* Turn on RX data and RX BD initiator state machine */
1617 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1619 /* Turn on send BD completion state machine */
1620 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1622 /* Turn on send data completion state machine */
1623 val = BGE_SDCMODE_ENABLE;
1624 if (sc->bnx_asicrev == BGE_ASICREV_BCM5761)
1625 val |= BGE_SDCMODE_CDELAY;
1626 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1628 /* Turn on send data initiator state machine */
1629 if (sc->bnx_flags & BNX_FLAG_TSO) {
1630 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
1631 BGE_SDIMODE_HW_LSO_PRE_DMA);
1633 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1636 /* Turn on send BD initiator state machine */
1637 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1639 /* Turn on send BD selector state machine */
1640 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1642 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1643 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1644 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1646 /* ack/clear link change events */
1647 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1648 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1649 BGE_MACSTAT_LINK_CHANGED);
1650 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1653 * Enable attention when the link has changed state for
1654 * devices that use auto polling.
1656 if (sc->bnx_flags & BNX_FLAG_TBI) {
1657 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1659 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
1660 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
1666 * Clear any pending link state attention.
1667 * Otherwise some link state change events may be lost until attention
1668 * is cleared by bnx_intr() -> bnx_softc.bnx_link_upd() sequence.
1669 * It's not necessary on newer BCM chips - perhaps enabling link
1670 * state change attentions implies clearing pending attention.
1672 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1673 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1674 BGE_MACSTAT_LINK_CHANGED);
1676 /* Enable link state change attentions. */
1677 BNX_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1683 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1684 * against our list and return its name if we find a match. Note
1685 * that since the Broadcom controller contains VPD support, we
1686 * can get the device name string from the controller itself instead
1687 * of the compiled-in string. This is a little slow, but it guarantees
1688 * we'll always announce the right product name.
1691 bnx_probe(device_t dev)
1693 const struct bnx_type *t;
1694 uint16_t product, vendor;
1696 if (!pci_is_pcie(dev))
1699 product = pci_get_device(dev);
1700 vendor = pci_get_vendor(dev);
1702 for (t = bnx_devs; t->bnx_name != NULL; t++) {
1703 if (vendor == t->bnx_vid && product == t->bnx_did)
1706 if (t->bnx_name == NULL)
1709 device_set_desc(dev, t->bnx_name);
1714 bnx_attach(device_t dev)
1717 struct bnx_softc *sc;
1718 struct bnx_rx_std_ring *std;
1720 int error = 0, rid, capmask, i, std_cpuid, std_cpuid_def;
1721 uint8_t ether_addr[ETHER_ADDR_LEN];
1723 uintptr_t mii_priv = 0;
1724 #ifdef BNX_TSO_DEBUG
1727 #ifdef IFPOLL_ENABLE
1728 int offset, offset_def;
1731 sc = device_get_softc(dev);
1733 callout_init_mp(&sc->bnx_tick_timer);
1734 lwkt_serialize_init(&sc->bnx_jslot_serializer);
1735 lwkt_serialize_init(&sc->bnx_main_serialize);
1737 product = pci_get_device(dev);
1739 #ifndef BURN_BRIDGES
1740 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1743 irq = pci_read_config(dev, PCIR_INTLINE, 4);
1744 mem = pci_read_config(dev, BGE_PCI_BAR0, 4);
1746 device_printf(dev, "chip is in D%d power mode "
1747 "-- setting to D0\n", pci_get_powerstate(dev));
1749 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1751 pci_write_config(dev, PCIR_INTLINE, irq, 4);
1752 pci_write_config(dev, BGE_PCI_BAR0, mem, 4);
1754 #endif /* !BURN_BRIDGE */
1757 * Map control/status registers.
1759 pci_enable_busmaster(dev);
1762 sc->bnx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1765 if (sc->bnx_res == NULL) {
1766 device_printf(dev, "couldn't map memory\n");
1770 sc->bnx_btag = rman_get_bustag(sc->bnx_res);
1771 sc->bnx_bhandle = rman_get_bushandle(sc->bnx_res);
1773 /* Save various chip information */
1775 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
1776 BGE_PCIMISCCTL_ASICREV_SHIFT;
1777 if (BGE_ASICREV(sc->bnx_chipid) == BGE_ASICREV_USE_PRODID_REG) {
1778 /* All chips having dedicated ASICREV register have CPMU */
1779 sc->bnx_flags |= BNX_FLAG_CPMU;
1782 case PCI_PRODUCT_BROADCOM_BCM5717:
1783 case PCI_PRODUCT_BROADCOM_BCM5717C:
1784 case PCI_PRODUCT_BROADCOM_BCM5718:
1785 case PCI_PRODUCT_BROADCOM_BCM5719:
1786 case PCI_PRODUCT_BROADCOM_BCM5720_ALT:
1787 case PCI_PRODUCT_BROADCOM_BCM5725:
1788 case PCI_PRODUCT_BROADCOM_BCM5727:
1789 case PCI_PRODUCT_BROADCOM_BCM5762:
1790 sc->bnx_chipid = pci_read_config(dev,
1791 BGE_PCI_GEN2_PRODID_ASICREV, 4);
1794 case PCI_PRODUCT_BROADCOM_BCM57761:
1795 case PCI_PRODUCT_BROADCOM_BCM57762:
1796 case PCI_PRODUCT_BROADCOM_BCM57765:
1797 case PCI_PRODUCT_BROADCOM_BCM57766:
1798 case PCI_PRODUCT_BROADCOM_BCM57781:
1799 case PCI_PRODUCT_BROADCOM_BCM57782:
1800 case PCI_PRODUCT_BROADCOM_BCM57785:
1801 case PCI_PRODUCT_BROADCOM_BCM57786:
1802 case PCI_PRODUCT_BROADCOM_BCM57791:
1803 case PCI_PRODUCT_BROADCOM_BCM57795:
1804 sc->bnx_chipid = pci_read_config(dev,
1805 BGE_PCI_GEN15_PRODID_ASICREV, 4);
1809 sc->bnx_chipid = pci_read_config(dev,
1810 BGE_PCI_PRODID_ASICREV, 4);
1814 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_C0)
1815 sc->bnx_chipid = BGE_CHIPID_BCM5720_A0;
1817 sc->bnx_asicrev = BGE_ASICREV(sc->bnx_chipid);
1818 sc->bnx_chiprev = BGE_CHIPREV(sc->bnx_chipid);
1820 switch (sc->bnx_asicrev) {
1821 case BGE_ASICREV_BCM5717:
1822 case BGE_ASICREV_BCM5719:
1823 case BGE_ASICREV_BCM5720:
1824 sc->bnx_flags |= BNX_FLAG_5717_PLUS | BNX_FLAG_57765_PLUS;
1827 case BGE_ASICREV_BCM5762:
1828 sc->bnx_flags |= BNX_FLAG_57765_PLUS;
1831 case BGE_ASICREV_BCM57765:
1832 case BGE_ASICREV_BCM57766:
1833 sc->bnx_flags |= BNX_FLAG_57765_FAMILY | BNX_FLAG_57765_PLUS;
1837 sc->bnx_flags |= BNX_FLAG_TSO;
1838 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 &&
1839 sc->bnx_chipid == BGE_CHIPID_BCM5719_A0)
1840 sc->bnx_flags &= ~BNX_FLAG_TSO;
1842 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1843 BNX_IS_57765_FAMILY(sc)) {
1845 * All BCM57785 and BCM5718 families chips have a bug that
1846 * under certain situation interrupt will not be enabled
1847 * even if status tag is written to BGE_MBX_IRQ0_LO mailbox.
1849 * While BCM5719 and BCM5720 have a hardware workaround
1850 * which could fix the above bug.
1851 * See the comment near BGE_PCIDMARWCTL_TAGGED_STATUS_WA in
1854 * For the rest of the chips in these two families, we will
1855 * have to poll the status block at high rate (10ms currently)
1856 * to check whether the interrupt is hosed or not.
1857 * See bnx_check_intr() for details.
1859 sc->bnx_flags |= BNX_FLAG_STATUSTAG_BUG;
1862 sc->bnx_pciecap = pci_get_pciecap_ptr(sc->bnx_dev);
1863 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1864 sc->bnx_asicrev == BGE_ASICREV_BCM5720)
1865 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_2048);
1867 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096);
1868 device_printf(dev, "CHIP ID 0x%08x; "
1869 "ASIC REV 0x%02x; CHIP REV 0x%02x\n",
1870 sc->bnx_chipid, sc->bnx_asicrev, sc->bnx_chiprev);
1873 * Set various PHY quirk flags.
1876 capmask = MII_CAPMASK_DEFAULT;
1877 if (product == PCI_PRODUCT_BROADCOM_BCM57791 ||
1878 product == PCI_PRODUCT_BROADCOM_BCM57795) {
1880 capmask &= ~BMSR_EXTSTAT;
1883 mii_priv |= BRGPHY_FLAG_WIRESPEED;
1884 if (sc->bnx_chipid == BGE_CHIPID_BCM5762_A0)
1885 mii_priv |= BRGPHY_FLAG_5762_A0;
1887 /* Initialize if_name earlier, so if_printf could be used */
1888 ifp = &sc->arpcom.ac_if;
1889 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1891 /* Try to reset the chip. */
1894 if (bnx_chipinit(sc)) {
1895 device_printf(dev, "chip initialization failed\n");
1901 * Get station address
1903 error = bnx_get_eaddr(sc, ether_addr);
1905 device_printf(dev, "failed to read station address\n");
1910 sc->bnx_tx_ringcnt = 1;
1911 sc->bnx_rx_retcnt = 1;
1913 if ((sc->bnx_rx_retcnt == 1 && sc->bnx_tx_ringcnt == 1) ||
1914 (sc->bnx_rx_retcnt > 1 && sc->bnx_tx_ringcnt > 1)) {
1916 * The RX ring and the corresponding TX ring processing
1917 * should be on the same CPU, since they share the same
1920 sc->bnx_flags |= BNX_FLAG_RXTX_BUNDLE;
1922 device_printf(dev, "RX/TX bundle\n");
1924 KKASSERT(sc->bnx_rx_retcnt > 1 && sc->bnx_tx_ringcnt == 1);
1927 error = bnx_dma_alloc(dev);
1931 #ifdef IFPOLL_ENABLE
1932 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) {
1934 * NPOLLING RX/TX CPU offset
1936 if (sc->bnx_rx_retcnt == ncpus2) {
1940 (sc->bnx_rx_retcnt * device_get_unit(dev)) % ncpus2;
1941 offset = device_getenv_int(dev, "npoll.offset",
1943 if (offset >= ncpus2 ||
1944 offset % sc->bnx_rx_retcnt != 0) {
1945 device_printf(dev, "invalid npoll.offset %d, "
1946 "use %d\n", offset, offset_def);
1947 offset = offset_def;
1950 sc->bnx_npoll_rxoff = offset;
1951 sc->bnx_npoll_txoff = offset;
1954 * NPOLLING RX CPU offset
1956 if (sc->bnx_rx_retcnt == ncpus2) {
1960 (sc->bnx_rx_retcnt * device_get_unit(dev)) % ncpus2;
1961 offset = device_getenv_int(dev, "npoll.rxoff",
1963 if (offset >= ncpus2 ||
1964 offset % sc->bnx_rx_retcnt != 0) {
1965 device_printf(dev, "invalid npoll.rxoff %d, "
1966 "use %d\n", offset, offset_def);
1967 offset = offset_def;
1970 sc->bnx_npoll_rxoff = offset;
1973 * NPOLLING TX CPU offset
1975 offset_def = device_get_unit(dev) % ncpus2;
1976 offset = device_getenv_int(dev, "npoll.txoff", offset_def);
1977 if (offset >= ncpus2) {
1978 device_printf(dev, "invalid npoll.txoff %d, use %d\n",
1979 offset, offset_def);
1980 offset = offset_def;
1982 sc->bnx_npoll_txoff = offset;
1984 #endif /* IFPOLL_ENABLE */
1987 * Allocate interrupt
1989 error = bnx_alloc_intr(sc);
1993 /* Setup serializers */
1994 bnx_setup_serialize(sc);
1996 /* Set default tuneable values. */
1997 sc->bnx_rx_coal_ticks = BNX_RX_COAL_TICKS_DEF;
1998 sc->bnx_tx_coal_ticks = BNX_TX_COAL_TICKS_DEF;
1999 sc->bnx_rx_coal_bds = BNX_RX_COAL_BDS_DEF;
2000 sc->bnx_tx_coal_bds = BNX_TX_COAL_BDS_DEF;
2001 sc->bnx_rx_coal_bds_int = BNX_RX_COAL_BDS_INT_DEF;
2002 sc->bnx_tx_coal_bds_int = BNX_TX_COAL_BDS_INT_DEF;
2004 /* Set up ifnet structure */
2006 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2007 ifp->if_ioctl = bnx_ioctl;
2008 ifp->if_start = bnx_start;
2009 #ifdef IFPOLL_ENABLE
2010 ifp->if_npoll = bnx_npoll;
2012 ifp->if_init = bnx_init;
2013 ifp->if_serialize = bnx_serialize;
2014 ifp->if_deserialize = bnx_deserialize;
2015 ifp->if_tryserialize = bnx_tryserialize;
2017 ifp->if_serialize_assert = bnx_serialize_assert;
2019 ifp->if_mtu = ETHERMTU;
2020 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2022 ifp->if_capabilities |= IFCAP_HWCSUM;
2023 ifp->if_hwassist = BNX_CSUM_FEATURES;
2024 if (sc->bnx_flags & BNX_FLAG_TSO) {
2025 ifp->if_capabilities |= IFCAP_TSO;
2026 ifp->if_hwassist |= CSUM_TSO;
2028 ifp->if_capenable = ifp->if_capabilities;
2030 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
2031 ifq_set_ready(&ifp->if_snd);
2032 ifq_set_subq_cnt(&ifp->if_snd, sc->bnx_tx_ringcnt);
2035 * Figure out what sort of media we have by checking the
2036 * hardware config word in the first 32k of NIC internal memory,
2037 * or fall back to examining the EEPROM if necessary.
2038 * Note: on some BCM5700 cards, this value appears to be unset.
2039 * If that's the case, we have to rely on identifying the NIC
2040 * by its PCI subsystem ID, as we do below for the SysKonnect
2043 if (bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) {
2044 hwcfg = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2046 if (bnx_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2048 device_printf(dev, "failed to read EEPROM\n");
2052 hwcfg = ntohl(hwcfg);
2055 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2056 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41 ||
2057 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2058 sc->bnx_flags |= BNX_FLAG_TBI;
2061 if (sc->bnx_flags & BNX_FLAG_CPMU)
2062 sc->bnx_mi_mode = BGE_MIMODE_500KHZ_CONST;
2064 sc->bnx_mi_mode = BGE_MIMODE_BASE;
2066 /* Setup link status update stuffs */
2067 if (sc->bnx_flags & BNX_FLAG_TBI) {
2068 sc->bnx_link_upd = bnx_tbi_link_upd;
2069 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
2070 } else if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
2071 sc->bnx_link_upd = bnx_autopoll_link_upd;
2072 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
2074 sc->bnx_link_upd = bnx_copper_link_upd;
2075 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
2078 /* Set default PHY address */
2082 * PHY address mapping for various devices.
2084 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2085 * ---------+-------+-------+-------+-------+
2086 * BCM57XX | 1 | X | X | X |
2087 * BCM5704 | 1 | X | 1 | X |
2088 * BCM5717 | 1 | 8 | 2 | 9 |
2089 * BCM5719 | 1 | 8 | 2 | 9 |
2090 * BCM5720 | 1 | 8 | 2 | 9 |
2092 * Other addresses may respond but they are not
2093 * IEEE compliant PHYs and should be ignored.
2095 if (BNX_IS_5717_PLUS(sc)) {
2098 f = pci_get_function(dev);
2099 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_A0) {
2100 if (CSR_READ_4(sc, BGE_SGDIG_STS) &
2101 BGE_SGDIGSTS_IS_SERDES)
2102 sc->bnx_phyno = f + 8;
2104 sc->bnx_phyno = f + 1;
2106 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2107 BGE_CPMU_PHY_STRAP_IS_SERDES)
2108 sc->bnx_phyno = f + 8;
2110 sc->bnx_phyno = f + 1;
2114 if (sc->bnx_flags & BNX_FLAG_TBI) {
2115 ifmedia_init(&sc->bnx_ifmedia, IFM_IMASK,
2116 bnx_ifmedia_upd, bnx_ifmedia_sts);
2117 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2118 ifmedia_add(&sc->bnx_ifmedia,
2119 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2120 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2121 ifmedia_set(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO);
2122 sc->bnx_ifmedia.ifm_media = sc->bnx_ifmedia.ifm_cur->ifm_media;
2124 struct mii_probe_args mii_args;
2126 mii_probe_args_init(&mii_args, bnx_ifmedia_upd, bnx_ifmedia_sts);
2127 mii_args.mii_probemask = 1 << sc->bnx_phyno;
2128 mii_args.mii_capmask = capmask;
2129 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY;
2130 mii_args.mii_priv = mii_priv;
2132 error = mii_probe(dev, &sc->bnx_miibus, &mii_args);
2134 device_printf(dev, "MII without any PHY!\n");
2140 * Create sysctl nodes.
2142 sysctl_ctx_init(&sc->bnx_sysctl_ctx);
2143 sc->bnx_sysctl_tree = SYSCTL_ADD_NODE(&sc->bnx_sysctl_ctx,
2144 SYSCTL_STATIC_CHILDREN(_hw),
2146 device_get_nameunit(dev),
2148 if (sc->bnx_sysctl_tree == NULL) {
2149 device_printf(dev, "can't add sysctl node\n");
2154 SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx,
2155 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2156 "rx_rings", CTLFLAG_RD, &sc->bnx_rx_retcnt, 0, "# of RX rings");
2157 SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx,
2158 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2159 "tx_rings", CTLFLAG_RD, &sc->bnx_tx_ringcnt, 0, "# of TX rings");
2161 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2162 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2163 OID_AUTO, "rx_coal_ticks",
2164 CTLTYPE_INT | CTLFLAG_RW,
2165 sc, 0, bnx_sysctl_rx_coal_ticks, "I",
2166 "Receive coalescing ticks (usec).");
2167 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2168 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2169 OID_AUTO, "tx_coal_ticks",
2170 CTLTYPE_INT | CTLFLAG_RW,
2171 sc, 0, bnx_sysctl_tx_coal_ticks, "I",
2172 "Transmit coalescing ticks (usec).");
2173 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2174 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2175 OID_AUTO, "rx_coal_bds",
2176 CTLTYPE_INT | CTLFLAG_RW,
2177 sc, 0, bnx_sysctl_rx_coal_bds, "I",
2178 "Receive max coalesced BD count.");
2179 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2180 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2181 OID_AUTO, "tx_coal_bds",
2182 CTLTYPE_INT | CTLFLAG_RW,
2183 sc, 0, bnx_sysctl_tx_coal_bds, "I",
2184 "Transmit max coalesced BD count.");
2186 * A common design characteristic for many Broadcom
2187 * client controllers is that they only support a
2188 * single outstanding DMA read operation on the PCIe
2189 * bus. This means that it will take twice as long to
2190 * fetch a TX frame that is split into header and
2191 * payload buffers as it does to fetch a single,
2192 * contiguous TX frame (2 reads vs. 1 read). For these
2193 * controllers, coalescing buffers to reduce the number
2194 * of memory reads is effective way to get maximum
2195 * performance(about 940Mbps). Without collapsing TX
2196 * buffers the maximum TCP bulk transfer performance
2197 * is about 850Mbps. However forcing coalescing mbufs
2198 * consumes a lot of CPU cycles, so leave it off by
2201 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2202 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2203 "force_defrag", CTLTYPE_INT | CTLFLAG_RW,
2204 sc, 0, bnx_sysctl_force_defrag, "I",
2205 "Force defragment on TX path");
2207 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2208 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2209 "tx_wreg", CTLTYPE_INT | CTLFLAG_RW,
2210 sc, 0, bnx_sysctl_tx_wreg, "I",
2211 "# of segments before writing to hardware register");
2213 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2214 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2215 "std_refill", CTLTYPE_INT | CTLFLAG_RW,
2216 sc, 0, bnx_sysctl_std_refill, "I",
2217 "# of packets received before scheduling standard refilling");
2219 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2220 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2221 "rx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2222 sc, 0, bnx_sysctl_rx_coal_bds_int, "I",
2223 "Receive max coalesced BD count during interrupt.");
2224 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2225 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2226 "tx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2227 sc, 0, bnx_sysctl_tx_coal_bds_int, "I",
2228 "Transmit max coalesced BD count during interrupt.");
2230 #ifdef IFPOLL_ENABLE
2231 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) {
2232 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2233 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2234 "npoll_offset", CTLTYPE_INT | CTLFLAG_RW,
2235 sc, 0, bnx_sysctl_npoll_offset, "I",
2236 "NPOLLING cpu offset");
2238 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2239 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2240 "npoll_rxoff", CTLTYPE_INT | CTLFLAG_RW,
2241 sc, 0, bnx_sysctl_npoll_rxoff, "I",
2242 "NPOLLING RX cpu offset");
2243 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2244 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2245 "npoll_txoff", CTLTYPE_INT | CTLFLAG_RW,
2246 sc, 0, bnx_sysctl_npoll_txoff, "I",
2247 "NPOLLING TX cpu offset");
2251 #ifdef BNX_TSO_DEBUG
2252 for (i = 0; i < BNX_TSO_NSTATS; ++i) {
2253 ksnprintf(desc, sizeof(desc), "tso%d", i + 1);
2254 SYSCTL_ADD_ULONG(&sc->bnx_sysctl_ctx,
2255 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2256 desc, CTLFLAG_RW, &sc->bnx_tsosegs[i], "");
2261 * Call MI attach routine.
2263 ether_ifattach(ifp, ether_addr, NULL);
2265 /* Setup TX rings and subqueues */
2266 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
2267 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i);
2268 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
2270 ifsq_set_cpuid(ifsq, txr->bnx_tx_cpuid);
2271 ifsq_set_hw_serialize(ifsq, &txr->bnx_tx_serialize);
2272 ifsq_set_priv(ifsq, txr);
2273 txr->bnx_ifsq = ifsq;
2275 ifsq_watchdog_init(&txr->bnx_tx_watchdog, ifsq, bnx_watchdog);
2278 error = bnx_setup_intr(sc);
2280 ether_ifdetach(ifp);
2283 bnx_set_tick_cpuid(sc, FALSE);
2286 * Create RX standard ring refilling thread
2288 std_cpuid_def = device_get_unit(dev) % ncpus;
2289 std_cpuid = device_getenv_int(dev, "std.cpuid", std_cpuid_def);
2290 if (std_cpuid < 0 || std_cpuid >= ncpus) {
2291 device_printf(dev, "invalid std.cpuid %d, use %d\n",
2292 std_cpuid, std_cpuid_def);
2293 std_cpuid = std_cpuid_def;
2296 std = &sc->bnx_rx_std_ring;
2297 lwkt_create(bnx_rx_std_refill_ithread, std, NULL,
2298 &std->bnx_rx_std_ithread, TDF_NOSTART | TDF_INTTHREAD, std_cpuid,
2299 "%s std", device_get_nameunit(dev));
2300 lwkt_setpri(&std->bnx_rx_std_ithread, TDPRI_INT_MED);
2301 std->bnx_rx_std_ithread.td_preemptable = lwkt_preempt;
2302 sc->bnx_flags |= BNX_FLAG_STD_THREAD;
2311 bnx_detach(device_t dev)
2313 struct bnx_softc *sc = device_get_softc(dev);
2315 if (device_is_attached(dev)) {
2316 struct ifnet *ifp = &sc->arpcom.ac_if;
2318 ifnet_serialize_all(ifp);
2321 bnx_teardown_intr(sc, sc->bnx_intr_cnt);
2322 ifnet_deserialize_all(ifp);
2324 ether_ifdetach(ifp);
2327 if (sc->bnx_flags & BNX_FLAG_STD_THREAD) {
2328 struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring;
2330 tsleep_interlock(std, 0);
2331 std->bnx_rx_std_stop = 1;
2333 lwkt_schedule(&std->bnx_rx_std_ithread);
2334 tsleep(std, PINTERLOCKED, "bnx_detach", 0);
2336 device_printf(dev, "RX std ithread exited\n");
2339 if (sc->bnx_flags & BNX_FLAG_TBI)
2340 ifmedia_removeall(&sc->bnx_ifmedia);
2342 device_delete_child(dev, sc->bnx_miibus);
2343 bus_generic_detach(dev);
2347 if (sc->bnx_res != NULL) {
2348 bus_release_resource(dev, SYS_RES_MEMORY,
2349 BGE_PCI_BAR0, sc->bnx_res);
2352 if (sc->bnx_sysctl_tree != NULL)
2353 sysctl_ctx_free(&sc->bnx_sysctl_ctx);
2357 if (sc->bnx_serialize != NULL)
2358 kfree(sc->bnx_serialize, M_DEVBUF);
2364 bnx_reset(struct bnx_softc *sc)
2367 uint32_t cachesize, command, pcistate, reset;
2368 void (*write_op)(struct bnx_softc *, uint32_t, uint32_t);
2374 write_op = bnx_writemem_direct;
2376 /* Save some important PCI state. */
2377 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2378 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2379 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2381 pci_write_config(dev, BGE_PCI_MISC_CTL,
2382 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2383 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2384 BGE_PCIMISCCTL_TAGGED_STATUS, 4);
2386 /* Disable fastboot on controllers that support it. */
2388 if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n");
2389 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2392 * Write the magic number to SRAM at offset 0xB50.
2393 * When firmware finishes its initialization it will
2394 * write ~BGE_MAGIC_NUMBER to the same location.
2396 bnx_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2398 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2400 /* XXX: Broadcom Linux driver. */
2401 /* Force PCI-E 1.0a mode */
2402 if (!BNX_IS_57765_PLUS(sc) &&
2403 CSR_READ_4(sc, BGE_PCIE_PHY_TSTCTL) ==
2404 (BGE_PCIE_PHY_TSTCTL_PSCRAM |
2405 BGE_PCIE_PHY_TSTCTL_PCIE10)) {
2406 CSR_WRITE_4(sc, BGE_PCIE_PHY_TSTCTL,
2407 BGE_PCIE_PHY_TSTCTL_PSCRAM);
2409 if (sc->bnx_chipid != BGE_CHIPID_BCM5750_A0) {
2410 /* Prevent PCIE link training during global reset */
2411 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2416 * Set GPHY Power Down Override to leave GPHY
2417 * powered up in D0 uninitialized.
2419 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0)
2420 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
2422 /* Issue global reset */
2423 write_op(sc, BGE_MISC_CFG, reset);
2427 /* XXX: Broadcom Linux driver. */
2428 if (sc->bnx_chipid == BGE_CHIPID_BCM5750_A0) {
2431 DELAY(500000); /* wait for link training to complete */
2432 v = pci_read_config(dev, 0xc4, 4);
2433 pci_write_config(dev, 0xc4, v | (1<<15), 4);
2436 devctl = pci_read_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL, 2);
2438 /* Disable no snoop and disable relaxed ordering. */
2439 devctl &= ~(PCIEM_DEVCTL_RELAX_ORDER | PCIEM_DEVCTL_NOSNOOP);
2441 /* Old PCI-E chips only support 128 bytes Max PayLoad Size. */
2442 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0) {
2443 devctl &= ~PCIEM_DEVCTL_MAX_PAYLOAD_MASK;
2444 devctl |= PCIEM_DEVCTL_MAX_PAYLOAD_128;
2447 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL,
2450 /* Clear error status. */
2451 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVSTS,
2452 PCIEM_DEVSTS_CORR_ERR |
2453 PCIEM_DEVSTS_NFATAL_ERR |
2454 PCIEM_DEVSTS_FATAL_ERR |
2455 PCIEM_DEVSTS_UNSUPP_REQ, 2);
2457 /* Reset some of the PCI state that got zapped by reset */
2458 pci_write_config(dev, BGE_PCI_MISC_CTL,
2459 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2460 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2461 BGE_PCIMISCCTL_TAGGED_STATUS, 4);
2462 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2463 pci_write_config(dev, BGE_PCI_CMD, command, 4);
2464 write_op(sc, BGE_MISC_CFG, (65 << 1));
2466 /* Enable memory arbiter */
2467 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2470 * Poll until we see the 1's complement of the magic number.
2471 * This indicates that the firmware initialization is complete.
2473 for (i = 0; i < BNX_FIRMWARE_TIMEOUT; i++) {
2474 val = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2475 if (val == ~BGE_MAGIC_NUMBER)
2479 if (i == BNX_FIRMWARE_TIMEOUT) {
2480 if_printf(&sc->arpcom.ac_if, "firmware handshake "
2481 "timed out, found 0x%08x\n", val);
2484 /* BCM57765 A0 needs additional time before accessing. */
2485 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0)
2489 * XXX Wait for the value of the PCISTATE register to
2490 * return to its original pre-reset state. This is a
2491 * fairly good indicator of reset completion. If we don't
2492 * wait for the reset to fully complete, trying to read
2493 * from the device's non-PCI registers may yield garbage
2496 for (i = 0; i < BNX_TIMEOUT; i++) {
2497 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2502 /* Fix up byte swapping */
2503 CSR_WRITE_4(sc, BGE_MODE_CTL, bnx_dma_swap_options(sc));
2505 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2508 * The 5704 in TBI mode apparently needs some special
2509 * adjustment to insure the SERDES drive level is set
2512 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704 &&
2513 (sc->bnx_flags & BNX_FLAG_TBI)) {
2516 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2517 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2518 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2521 CSR_WRITE_4(sc, BGE_MI_MODE,
2522 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
2525 /* XXX: Broadcom Linux driver. */
2526 if (!BNX_IS_57765_PLUS(sc)) {
2529 /* Enable Data FIFO protection. */
2530 v = CSR_READ_4(sc, BGE_PCIE_TLDLPL_PORT);
2531 CSR_WRITE_4(sc, BGE_PCIE_TLDLPL_PORT, v | (1 << 25));
2536 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
2537 BNX_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
2538 CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
2543 * Frame reception handling. This is called if there's a frame
2544 * on the receive return list.
2546 * Note: we have to be able to handle two possibilities here:
2547 * 1) the frame is from the jumbo recieve ring
2548 * 2) the frame is from the standard receive ring
2552 bnx_rxeof(struct bnx_rx_ret_ring *ret, uint16_t rx_prod, int count)
2554 struct bnx_softc *sc = ret->bnx_sc;
2555 struct bnx_rx_std_ring *std = ret->bnx_std;
2556 struct ifnet *ifp = &sc->arpcom.ac_if;
2558 while (ret->bnx_rx_saved_considx != rx_prod && count != 0) {
2559 struct bge_rx_bd *cur_rx;
2560 struct bnx_rx_buf *rb;
2562 struct mbuf *m = NULL;
2563 uint16_t vlan_tag = 0;
2568 cur_rx = &ret->bnx_rx_ret_ring[ret->bnx_rx_saved_considx];
2570 rxidx = cur_rx->bge_idx;
2571 BNX_INC(ret->bnx_rx_saved_considx, BNX_RETURN_RING_CNT);
2573 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2575 vlan_tag = cur_rx->bge_vlan_tag;
2578 if (ret->bnx_rx_cnt >= ret->bnx_rx_cntmax) {
2579 ret->bnx_rx_cnt = 0;
2581 atomic_set_int(&std->bnx_rx_std_refill,
2583 if (atomic_poll_acquire_int(&std->bnx_rx_std_running))
2584 lwkt_schedule(&std->bnx_rx_std_ithread);
2588 rb = &std->bnx_rx_std_buf[rxidx];
2589 m = rb->bnx_rx_mbuf;
2590 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2591 IFNET_STAT_INC(ifp, ierrors, 1);
2593 rb->bnx_rx_refilled = 1;
2596 if (bnx_newbuf_std(ret, rxidx, 0)) {
2597 IFNET_STAT_INC(ifp, ierrors, 1);
2601 IFNET_STAT_INC(ifp, ipackets, 1);
2602 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2603 m->m_pkthdr.rcvif = ifp;
2605 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2606 (cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
2607 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2608 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2609 if ((cur_rx->bge_error_flag &
2610 BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
2611 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2613 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2614 m->m_pkthdr.csum_data =
2615 cur_rx->bge_tcp_udp_csum;
2616 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
2622 * If we received a packet with a vlan tag, pass it
2623 * to vlan_input() instead of ether_input().
2626 m->m_flags |= M_VLANTAG;
2627 m->m_pkthdr.ether_vlantag = vlan_tag;
2629 ifp->if_input(ifp, m);
2631 bnx_writembx(sc, BGE_MBX_RX_CONS0_LO, ret->bnx_rx_saved_considx);
2635 bnx_txeof(struct bnx_tx_ring *txr, uint16_t tx_cons)
2637 struct ifnet *ifp = &txr->bnx_sc->arpcom.ac_if;
2640 * Go through our tx ring and free mbufs for those
2641 * frames that have been sent.
2643 while (txr->bnx_tx_saved_considx != tx_cons) {
2644 struct bnx_tx_buf *buf;
2647 idx = txr->bnx_tx_saved_considx;
2648 buf = &txr->bnx_tx_buf[idx];
2649 if (buf->bnx_tx_mbuf != NULL) {
2650 IFNET_STAT_INC(ifp, opackets, 1);
2651 bus_dmamap_unload(txr->bnx_tx_mtag,
2652 buf->bnx_tx_dmamap);
2653 m_freem(buf->bnx_tx_mbuf);
2654 buf->bnx_tx_mbuf = NULL;
2657 BNX_INC(txr->bnx_tx_saved_considx, BGE_TX_RING_CNT);
2660 if ((BGE_TX_RING_CNT - txr->bnx_tx_cnt) >=
2661 (BNX_NSEG_RSVD + BNX_NSEG_SPARE))
2662 ifsq_clr_oactive(txr->bnx_ifsq);
2664 if (txr->bnx_tx_cnt == 0)
2665 txr->bnx_tx_watchdog.wd_timer = 0;
2667 if (!ifsq_is_empty(txr->bnx_ifsq))
2668 ifsq_devstart(txr->bnx_ifsq);
2671 #ifdef IFPOLL_ENABLE
2674 bnx_npoll_rx(struct ifnet *ifp __unused, void *xret, int cycle)
2676 struct bnx_rx_ret_ring *ret = xret;
2679 ASSERT_SERIALIZED(&ret->bnx_rx_ret_serialize);
2681 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag;
2684 rx_prod = *ret->bnx_rx_considx;
2685 if (ret->bnx_rx_saved_considx != rx_prod)
2686 bnx_rxeof(ret, rx_prod, cycle);
2690 bnx_npoll_tx(struct ifnet *ifp __unused, void *xtxr, int cycle __unused)
2692 struct bnx_tx_ring *txr = xtxr;
2695 ASSERT_SERIALIZED(&txr->bnx_tx_serialize);
2697 tx_cons = *txr->bnx_tx_considx;
2698 if (txr->bnx_tx_saved_considx != tx_cons)
2699 bnx_txeof(txr, tx_cons);
2703 bnx_npoll_status(struct ifnet *ifp)
2705 struct bnx_softc *sc = ifp->if_softc;
2706 struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
2708 ASSERT_SERIALIZED(&sc->bnx_main_serialize);
2710 if ((sblk->bge_status & BGE_STATFLAG_LINKSTATE_CHANGED) ||
2716 bnx_npoll(struct ifnet *ifp, struct ifpoll_info *info)
2718 struct bnx_softc *sc = ifp->if_softc;
2721 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2725 * TODO handle RXTX bundle and non-bundle
2727 info->ifpi_status.status_func = bnx_npoll_status;
2728 info->ifpi_status.serializer = &sc->bnx_main_serialize;
2730 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
2731 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
2732 int idx = i + sc->bnx_npoll_txoff;
2734 KKASSERT(idx < ncpus2);
2735 info->ifpi_tx[idx].poll_func = bnx_npoll_tx;
2736 info->ifpi_tx[idx].arg = txr;
2737 info->ifpi_tx[idx].serializer = &txr->bnx_tx_serialize;
2738 ifsq_set_cpuid(txr->bnx_ifsq, idx);
2741 for (i = 0; i < sc->bnx_rx_retcnt; ++i) {
2742 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i];
2743 int idx = i + sc->bnx_npoll_rxoff;
2745 KKASSERT(idx < ncpus2);
2746 info->ifpi_rx[idx].poll_func = bnx_npoll_rx;
2747 info->ifpi_rx[idx].arg = ret;
2748 info->ifpi_rx[idx].serializer =
2749 &ret->bnx_rx_ret_serialize;
2752 if (ifp->if_flags & IFF_RUNNING) {
2753 bnx_disable_intr(sc);
2754 bnx_set_tick_cpuid(sc, TRUE);
2757 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
2758 ifsq_set_cpuid(sc->bnx_tx_ring[i].bnx_ifsq,
2759 sc->bnx_tx_ring[i].bnx_tx_cpuid);
2761 if (ifp->if_flags & IFF_RUNNING) {
2762 bnx_enable_intr(sc);
2763 bnx_set_tick_cpuid(sc, FALSE);
2768 #endif /* IFPOLL_ENABLE */
2771 bnx_intr_legacy(void *xsc)
2773 struct bnx_softc *sc = xsc;
2774 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0];
2776 if (ret->bnx_saved_status_tag == *ret->bnx_hw_status_tag) {
2779 val = pci_read_config(sc->bnx_dev, BGE_PCI_PCISTATE, 4);
2780 if (val & BGE_PCISTAT_INTR_NOTACT)
2786 * Interrupt will have to be disabled if tagged status
2787 * is used, else interrupt will always be asserted on
2788 * certain chips (at least on BCM5750 AX/BX).
2790 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
2798 struct bnx_softc *sc = xsc;
2800 /* Disable interrupt first */
2801 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
2806 bnx_msi_oneshot(void *xsc)
2812 bnx_intr(struct bnx_softc *sc)
2814 struct ifnet *ifp = &sc->arpcom.ac_if;
2815 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0];
2816 struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
2819 ASSERT_SERIALIZED(&sc->bnx_main_serialize);
2821 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag;
2823 * Use a load fence to ensure that status_tag is saved
2824 * before rx_prod, tx_cons and status.
2828 status = sblk->bge_status;
2830 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) || sc->bnx_link_evt)
2833 if (ifp->if_flags & IFF_RUNNING) {
2834 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0];
2835 uint16_t rx_prod, tx_cons;
2837 lwkt_serialize_enter(&ret->bnx_rx_ret_serialize);
2838 rx_prod = *ret->bnx_rx_considx;
2839 if (ret->bnx_rx_saved_considx != rx_prod)
2840 bnx_rxeof(ret, rx_prod, -1);
2841 lwkt_serialize_exit(&ret->bnx_rx_ret_serialize);
2843 lwkt_serialize_enter(&txr->bnx_tx_serialize);
2844 tx_cons = *txr->bnx_tx_considx;
2845 if (txr->bnx_tx_saved_considx != tx_cons)
2846 bnx_txeof(txr, tx_cons);
2847 lwkt_serialize_exit(&txr->bnx_tx_serialize);
2850 bnx_writembx(sc, BGE_MBX_IRQ0_LO, ret->bnx_saved_status_tag << 24);
2856 struct bnx_softc *sc = xsc;
2858 lwkt_serialize_enter(&sc->bnx_main_serialize);
2860 bnx_stats_update_regs(sc);
2862 if (sc->bnx_flags & BNX_FLAG_TBI) {
2864 * Since in TBI mode auto-polling can't be used we should poll
2865 * link status manually. Here we register pending link event
2866 * and trigger interrupt.
2869 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
2870 } else if (!sc->bnx_link) {
2871 mii_tick(device_get_softc(sc->bnx_miibus));
2874 callout_reset_bycpu(&sc->bnx_tick_timer, hz, bnx_tick, sc,
2875 sc->bnx_tick_cpuid);
2877 lwkt_serialize_exit(&sc->bnx_main_serialize);
2881 bnx_stats_update_regs(struct bnx_softc *sc)
2883 struct ifnet *ifp = &sc->arpcom.ac_if;
2884 struct bge_mac_stats_regs stats;
2888 s = (uint32_t *)&stats;
2889 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2890 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2894 IFNET_STAT_SET(ifp, collisions,
2895 (stats.dot3StatsSingleCollisionFrames +
2896 stats.dot3StatsMultipleCollisionFrames +
2897 stats.dot3StatsExcessiveCollisions +
2898 stats.dot3StatsLateCollisions));
2902 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2903 * pointers to descriptors.
2906 bnx_encap(struct bnx_tx_ring *txr, struct mbuf **m_head0, uint32_t *txidx,
2909 struct bge_tx_bd *d = NULL;
2910 uint16_t csum_flags = 0, vlan_tag = 0, mss = 0;
2911 bus_dma_segment_t segs[BNX_NSEG_NEW];
2913 int error, maxsegs, nsegs, idx, i;
2914 struct mbuf *m_head = *m_head0, *m_new;
2916 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
2917 #ifdef BNX_TSO_DEBUG
2921 error = bnx_setup_tso(txr, m_head0, &mss, &csum_flags);
2926 #ifdef BNX_TSO_DEBUG
2927 tso_nsegs = (m_head->m_pkthdr.len /
2928 m_head->m_pkthdr.tso_segsz) - 1;
2929 if (tso_nsegs > (BNX_TSO_NSTATS - 1))
2930 tso_nsegs = BNX_TSO_NSTATS - 1;
2931 else if (tso_nsegs < 0)
2933 txr->bnx_sc->bnx_tsosegs[tso_nsegs]++;
2935 } else if (m_head->m_pkthdr.csum_flags & BNX_CSUM_FEATURES) {
2936 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2937 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2938 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2939 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2940 if (m_head->m_flags & M_LASTFRAG)
2941 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2942 else if (m_head->m_flags & M_FRAG)
2943 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2945 if (m_head->m_flags & M_VLANTAG) {
2946 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
2947 vlan_tag = m_head->m_pkthdr.ether_vlantag;
2951 map = txr->bnx_tx_buf[idx].bnx_tx_dmamap;
2953 maxsegs = (BGE_TX_RING_CNT - txr->bnx_tx_cnt) - BNX_NSEG_RSVD;
2954 KASSERT(maxsegs >= BNX_NSEG_SPARE,
2955 ("not enough segments %d", maxsegs));
2957 if (maxsegs > BNX_NSEG_NEW)
2958 maxsegs = BNX_NSEG_NEW;
2961 * Pad outbound frame to BGE_MIN_FRAMELEN for an unusual reason.
2962 * The bge hardware will pad out Tx runts to BGE_MIN_FRAMELEN,
2963 * but when such padded frames employ the bge IP/TCP checksum
2964 * offload, the hardware checksum assist gives incorrect results
2965 * (possibly from incorporating its own padding into the UDP/TCP
2966 * checksum; who knows). If we pad such runts with zeros, the
2967 * onboard checksum comes out correct.
2969 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
2970 m_head->m_pkthdr.len < BNX_MIN_FRAMELEN) {
2971 error = m_devpad(m_head, BNX_MIN_FRAMELEN);
2976 if ((txr->bnx_tx_flags & BNX_TX_FLAG_SHORTDMA) &&
2977 m_head->m_next != NULL) {
2978 m_new = bnx_defrag_shortdma(m_head);
2979 if (m_new == NULL) {
2983 *m_head0 = m_head = m_new;
2985 if ((m_head->m_pkthdr.csum_flags & CSUM_TSO) == 0 &&
2986 (txr->bnx_tx_flags & BNX_TX_FLAG_FORCE_DEFRAG) &&
2987 m_head->m_next != NULL) {
2989 * Forcefully defragment mbuf chain to overcome hardware
2990 * limitation which only support a single outstanding
2991 * DMA read operation. If it fails, keep moving on using
2992 * the original mbuf chain.
2994 m_new = m_defrag(m_head, MB_DONTWAIT);
2996 *m_head0 = m_head = m_new;
2999 error = bus_dmamap_load_mbuf_defrag(txr->bnx_tx_mtag, map,
3000 m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
3003 *segs_used += nsegs;
3006 bus_dmamap_sync(txr->bnx_tx_mtag, map, BUS_DMASYNC_PREWRITE);
3008 for (i = 0; ; i++) {
3009 d = &txr->bnx_tx_ring[idx];
3011 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
3012 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
3013 d->bge_len = segs[i].ds_len;
3014 d->bge_flags = csum_flags;
3015 d->bge_vlan_tag = vlan_tag;
3020 BNX_INC(idx, BGE_TX_RING_CNT);
3022 /* Mark the last segment as end of packet... */
3023 d->bge_flags |= BGE_TXBDFLAG_END;
3026 * Insure that the map for this transmission is placed at
3027 * the array index of the last descriptor in this chain.
3029 txr->bnx_tx_buf[*txidx].bnx_tx_dmamap = txr->bnx_tx_buf[idx].bnx_tx_dmamap;
3030 txr->bnx_tx_buf[idx].bnx_tx_dmamap = map;
3031 txr->bnx_tx_buf[idx].bnx_tx_mbuf = m_head;
3032 txr->bnx_tx_cnt += nsegs;
3034 BNX_INC(idx, BGE_TX_RING_CNT);
3045 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3046 * to the mbuf data regions directly in the transmit descriptors.
3049 bnx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
3051 struct bnx_tx_ring *txr = ifsq_get_priv(ifsq);
3052 struct mbuf *m_head = NULL;
3056 KKASSERT(txr->bnx_ifsq == ifsq);
3057 ASSERT_SERIALIZED(&txr->bnx_tx_serialize);
3059 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq))
3062 prodidx = txr->bnx_tx_prodidx;
3064 while (txr->bnx_tx_buf[prodidx].bnx_tx_mbuf == NULL) {
3066 * Sanity check: avoid coming within BGE_NSEG_RSVD
3067 * descriptors of the end of the ring. Also make
3068 * sure there are BGE_NSEG_SPARE descriptors for
3069 * jumbo buffers' or TSO segments' defragmentation.
3071 if ((BGE_TX_RING_CNT - txr->bnx_tx_cnt) <
3072 (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) {
3073 ifsq_set_oactive(ifsq);
3077 m_head = ifsq_dequeue(ifsq, NULL);
3082 * Pack the data into the transmit ring. If we
3083 * don't have room, set the OACTIVE flag and wait
3084 * for the NIC to drain the ring.
3086 if (bnx_encap(txr, &m_head, &prodidx, &nsegs)) {
3087 ifsq_set_oactive(ifsq);
3088 IFNET_STAT_INC(ifp, oerrors, 1);
3092 if (nsegs >= txr->bnx_tx_wreg) {
3094 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, prodidx);
3098 ETHER_BPF_MTAP(ifp, m_head);
3101 * Set a timeout in case the chip goes out to lunch.
3103 txr->bnx_tx_watchdog.wd_timer = 5;
3108 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, prodidx);
3110 txr->bnx_tx_prodidx = prodidx;
3116 struct bnx_softc *sc = xsc;
3117 struct ifnet *ifp = &sc->arpcom.ac_if;
3123 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3125 /* Cancel pending I/O and flush buffers. */
3131 * Init the various state machines, ring
3132 * control blocks and firmware.
3134 if (bnx_blockinit(sc)) {
3135 if_printf(ifp, "initialization failure\n");
3141 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3142 ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
3144 /* Load our MAC address. */
3145 m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
3146 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3147 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3149 /* Enable or disable promiscuous mode as needed. */
3152 /* Program multicast filter. */
3156 if (bnx_init_rx_ring_std(&sc->bnx_rx_std_ring)) {
3157 if_printf(ifp, "RX ring initialization failed\n");
3162 /* Init jumbo RX ring. */
3163 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) {
3164 if (bnx_init_rx_ring_jumbo(sc)) {
3165 if_printf(ifp, "Jumbo RX ring initialization failed\n");
3171 /* Init our RX return ring index */
3172 for (i = 0; i < sc->bnx_rx_retcnt; ++i) {
3173 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i];
3175 ret->bnx_rx_saved_considx = 0;
3176 ret->bnx_rx_cnt = 0;
3180 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
3181 bnx_init_tx_ring(&sc->bnx_tx_ring[i]);
3183 /* Enable TX MAC state machine lockup fix. */
3184 mode = CSR_READ_4(sc, BGE_TX_MODE);
3185 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
3186 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
3187 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
3188 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
3189 mode |= CSR_READ_4(sc, BGE_TX_MODE) &
3190 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
3192 /* Turn on transmitter */
3193 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
3195 /* Turn on receiver */
3196 BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3199 * Set the number of good frames to receive after RX MBUF
3200 * Low Watermark has been reached. After the RX MAC receives
3201 * this number of frames, it will drop subsequent incoming
3202 * frames until the MBUF High Watermark is reached.
3204 if (BNX_IS_57765_FAMILY(sc))
3205 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
3207 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
3209 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI) {
3211 if_printf(ifp, "MSI_MODE: %#x\n",
3212 CSR_READ_4(sc, BGE_MSI_MODE));
3216 /* Tell firmware we're alive. */
3217 BNX_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3219 /* Enable host interrupts if polling(4) is not enabled. */
3220 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA, 4);
3223 #ifdef IFPOLL_ENABLE
3224 if (ifp->if_flags & IFF_NPOLLING)
3228 bnx_disable_intr(sc);
3230 bnx_enable_intr(sc);
3231 bnx_set_tick_cpuid(sc, polling);
3233 bnx_ifmedia_upd(ifp);
3235 ifp->if_flags |= IFF_RUNNING;
3236 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
3237 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
3239 ifsq_clr_oactive(txr->bnx_ifsq);
3240 ifsq_watchdog_start(&txr->bnx_tx_watchdog);
3243 callout_reset_bycpu(&sc->bnx_tick_timer, hz, bnx_tick, sc,
3244 sc->bnx_tick_cpuid);
3248 * Set media options.
3251 bnx_ifmedia_upd(struct ifnet *ifp)
3253 struct bnx_softc *sc = ifp->if_softc;
3255 /* If this is a 1000baseX NIC, enable the TBI port. */
3256 if (sc->bnx_flags & BNX_FLAG_TBI) {
3257 struct ifmedia *ifm = &sc->bnx_ifmedia;
3259 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3262 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3267 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3268 BNX_CLRBIT(sc, BGE_MAC_MODE,
3269 BGE_MACMODE_HALF_DUPLEX);
3271 BNX_SETBIT(sc, BGE_MAC_MODE,
3272 BGE_MACMODE_HALF_DUPLEX);
3279 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3283 if (mii->mii_instance) {
3284 struct mii_softc *miisc;
3286 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3287 mii_phy_reset(miisc);
3292 * Force an interrupt so that we will call bnx_link_upd
3293 * if needed and clear any pending link state attention.
3294 * Without this we are not getting any further interrupts
3295 * for link state changes and thus will not UP the link and
3296 * not be able to send in bnx_start. The only way to get
3297 * things working was to receive a packet and get an RX
3300 * bnx_tick should help for fiber cards and we might not
3301 * need to do this here if BNX_FLAG_TBI is set but as
3302 * we poll for fiber anyway it should not harm.
3304 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3310 * Report current media status.
3313 bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3315 struct bnx_softc *sc = ifp->if_softc;
3317 if (sc->bnx_flags & BNX_FLAG_TBI) {
3318 ifmr->ifm_status = IFM_AVALID;
3319 ifmr->ifm_active = IFM_ETHER;
3320 if (CSR_READ_4(sc, BGE_MAC_STS) &
3321 BGE_MACSTAT_TBI_PCS_SYNCHED) {
3322 ifmr->ifm_status |= IFM_ACTIVE;
3324 ifmr->ifm_active |= IFM_NONE;
3328 ifmr->ifm_active |= IFM_1000_SX;
3329 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3330 ifmr->ifm_active |= IFM_HDX;
3332 ifmr->ifm_active |= IFM_FDX;
3334 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3337 ifmr->ifm_active = mii->mii_media_active;
3338 ifmr->ifm_status = mii->mii_media_status;
3343 bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
3345 struct bnx_softc *sc = ifp->if_softc;
3346 struct ifreq *ifr = (struct ifreq *)data;
3347 int mask, error = 0;
3349 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3353 if ((!BNX_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) ||
3354 (BNX_IS_JUMBO_CAPABLE(sc) &&
3355 ifr->ifr_mtu > BNX_JUMBO_MTU)) {
3357 } else if (ifp->if_mtu != ifr->ifr_mtu) {
3358 ifp->if_mtu = ifr->ifr_mtu;
3359 if (ifp->if_flags & IFF_RUNNING)
3364 if (ifp->if_flags & IFF_UP) {
3365 if (ifp->if_flags & IFF_RUNNING) {
3366 mask = ifp->if_flags ^ sc->bnx_if_flags;
3369 * If only the state of the PROMISC flag
3370 * changed, then just use the 'set promisc
3371 * mode' command instead of reinitializing
3372 * the entire NIC. Doing a full re-init
3373 * means reloading the firmware and waiting
3374 * for it to start up, which may take a
3375 * second or two. Similarly for ALLMULTI.
3377 if (mask & IFF_PROMISC)
3379 if (mask & IFF_ALLMULTI)
3384 } else if (ifp->if_flags & IFF_RUNNING) {
3387 sc->bnx_if_flags = ifp->if_flags;
3391 if (ifp->if_flags & IFF_RUNNING)
3396 if (sc->bnx_flags & BNX_FLAG_TBI) {
3397 error = ifmedia_ioctl(ifp, ifr,
3398 &sc->bnx_ifmedia, command);
3400 struct mii_data *mii;
3402 mii = device_get_softc(sc->bnx_miibus);
3403 error = ifmedia_ioctl(ifp, ifr,
3404 &mii->mii_media, command);
3408 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3409 if (mask & IFCAP_HWCSUM) {
3410 ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
3411 if (ifp->if_capenable & IFCAP_TXCSUM)
3412 ifp->if_hwassist |= BNX_CSUM_FEATURES;
3414 ifp->if_hwassist &= ~BNX_CSUM_FEATURES;
3416 if (mask & IFCAP_TSO) {
3417 ifp->if_capenable ^= (mask & IFCAP_TSO);
3418 if (ifp->if_capenable & IFCAP_TSO)
3419 ifp->if_hwassist |= CSUM_TSO;
3421 ifp->if_hwassist &= ~CSUM_TSO;
3425 error = ether_ioctl(ifp, command, data);
3432 bnx_watchdog(struct ifaltq_subque *ifsq)
3434 struct ifnet *ifp = ifsq_get_ifp(ifsq);
3435 struct bnx_softc *sc = ifp->if_softc;
3438 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3440 if_printf(ifp, "watchdog timeout -- resetting\n");
3444 IFNET_STAT_INC(ifp, oerrors, 1);
3446 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
3447 ifsq_devstart_sched(sc->bnx_tx_ring[i].bnx_ifsq);
3451 * Stop the adapter and free any mbufs allocated to the
3455 bnx_stop(struct bnx_softc *sc)
3457 struct ifnet *ifp = &sc->arpcom.ac_if;
3460 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3462 callout_stop(&sc->bnx_tick_timer);
3465 * Disable all of the receiver blocks
3467 bnx_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3468 bnx_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3469 bnx_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3470 bnx_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3471 bnx_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3472 bnx_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3475 * Disable all of the transmit blocks
3477 bnx_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3478 bnx_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3479 bnx_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3480 bnx_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3481 bnx_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3482 bnx_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3485 * Shut down all of the memory managers and related
3488 bnx_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3489 bnx_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3490 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3491 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3493 /* Disable host interrupts. */
3494 bnx_disable_intr(sc);
3497 * Tell firmware we're shutting down.
3499 BNX_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3501 /* Free the RX lists. */
3502 bnx_free_rx_ring_std(&sc->bnx_rx_std_ring);
3504 /* Free jumbo RX list. */
3505 if (BNX_IS_JUMBO_CAPABLE(sc))
3506 bnx_free_rx_ring_jumbo(sc);
3508 /* Free TX buffers. */
3509 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
3510 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
3512 txr->bnx_saved_status_tag = 0;
3513 bnx_free_tx_ring(txr);
3516 /* Clear saved status tag */
3517 for (i = 0; i < sc->bnx_rx_retcnt; ++i)
3518 sc->bnx_rx_ret_ring[i].bnx_saved_status_tag = 0;
3521 sc->bnx_coal_chg = 0;
3523 ifp->if_flags &= ~IFF_RUNNING;
3524 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
3525 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
3527 ifsq_clr_oactive(txr->bnx_ifsq);
3528 ifsq_watchdog_stop(&txr->bnx_tx_watchdog);
3533 * Stop all chip I/O so that the kernel's probe routines don't
3534 * get confused by errant DMAs when rebooting.
3537 bnx_shutdown(device_t dev)
3539 struct bnx_softc *sc = device_get_softc(dev);
3540 struct ifnet *ifp = &sc->arpcom.ac_if;
3542 ifnet_serialize_all(ifp);
3545 ifnet_deserialize_all(ifp);
3549 bnx_suspend(device_t dev)
3551 struct bnx_softc *sc = device_get_softc(dev);
3552 struct ifnet *ifp = &sc->arpcom.ac_if;
3554 ifnet_serialize_all(ifp);
3556 ifnet_deserialize_all(ifp);
3562 bnx_resume(device_t dev)
3564 struct bnx_softc *sc = device_get_softc(dev);
3565 struct ifnet *ifp = &sc->arpcom.ac_if;
3567 ifnet_serialize_all(ifp);
3569 if (ifp->if_flags & IFF_UP) {
3573 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
3574 ifsq_devstart_sched(sc->bnx_tx_ring[i].bnx_ifsq);
3577 ifnet_deserialize_all(ifp);
3583 bnx_setpromisc(struct bnx_softc *sc)
3585 struct ifnet *ifp = &sc->arpcom.ac_if;
3587 if (ifp->if_flags & IFF_PROMISC)
3588 BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3590 BNX_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3594 bnx_dma_free(struct bnx_softc *sc)
3596 struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring;
3599 /* Destroy RX return rings */
3600 if (sc->bnx_rx_ret_ring != NULL) {
3601 for (i = 0; i < sc->bnx_rx_retcnt; ++i)
3602 bnx_destroy_rx_ret_ring(&sc->bnx_rx_ret_ring[i]);
3603 kfree(sc->bnx_rx_ret_ring, M_DEVBUF);
3606 /* Destroy RX mbuf DMA stuffs. */
3607 if (std->bnx_rx_mtag != NULL) {
3608 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3609 KKASSERT(std->bnx_rx_std_buf[i].bnx_rx_mbuf == NULL);
3610 bus_dmamap_destroy(std->bnx_rx_mtag,
3611 std->bnx_rx_std_buf[i].bnx_rx_dmamap);
3613 bus_dma_tag_destroy(std->bnx_rx_mtag);
3616 /* Destroy standard RX ring */
3617 bnx_dma_block_free(std->bnx_rx_std_ring_tag,
3618 std->bnx_rx_std_ring_map, std->bnx_rx_std_ring);
3620 /* Destroy TX rings */
3621 if (sc->bnx_tx_ring != NULL) {
3622 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
3623 bnx_destroy_tx_ring(&sc->bnx_tx_ring[i]);
3624 kfree(sc->bnx_tx_ring, M_DEVBUF);
3627 if (BNX_IS_JUMBO_CAPABLE(sc))
3628 bnx_free_jumbo_mem(sc);
3630 /* Destroy status block */
3631 bnx_dma_block_free(sc->bnx_cdata.bnx_status_tag,
3632 sc->bnx_cdata.bnx_status_map,
3633 sc->bnx_ldata.bnx_status_block);
3635 /* Destroy the parent tag */
3636 if (sc->bnx_cdata.bnx_parent_tag != NULL)
3637 bus_dma_tag_destroy(sc->bnx_cdata.bnx_parent_tag);
3641 bnx_dma_alloc(device_t dev)
3643 struct bnx_softc *sc = device_get_softc(dev);
3644 struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring;
3648 * Allocate the parent bus DMA tag appropriate for PCI.
3650 * All of the NetExtreme/NetLink controllers have 4GB boundary
3652 * Whenever an address crosses a multiple of the 4GB boundary
3653 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
3654 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
3655 * state machine will lockup and cause the device to hang.
3657 error = bus_dma_tag_create(NULL, 1, BGE_DMA_BOUNDARY_4G,
3658 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
3659 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
3660 0, &sc->bnx_cdata.bnx_parent_tag);
3662 device_printf(dev, "could not create parent DMA tag\n");
3667 * Create DMA stuffs for status block.
3669 error = bnx_dma_block_alloc(sc, BGE_STATUS_BLK_SZ,
3670 &sc->bnx_cdata.bnx_status_tag,
3671 &sc->bnx_cdata.bnx_status_map,
3672 (void *)&sc->bnx_ldata.bnx_status_block,
3673 &sc->bnx_ldata.bnx_status_block_paddr);
3675 device_printf(dev, "could not create status block\n");
3680 * Create DMA tag and maps for RX mbufs.
3683 lwkt_serialize_init(&std->bnx_rx_std_serialize);
3684 error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0,
3685 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3686 NULL, NULL, MCLBYTES, 1, MCLBYTES,
3687 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, &std->bnx_rx_mtag);
3689 device_printf(dev, "could not create RX mbuf DMA tag\n");
3693 for (i = 0; i < BGE_STD_RX_RING_CNT; ++i) {
3694 error = bus_dmamap_create(std->bnx_rx_mtag, BUS_DMA_WAITOK,
3695 &std->bnx_rx_std_buf[i].bnx_rx_dmamap);
3699 for (j = 0; j < i; ++j) {
3700 bus_dmamap_destroy(std->bnx_rx_mtag,
3701 std->bnx_rx_std_buf[j].bnx_rx_dmamap);
3703 bus_dma_tag_destroy(std->bnx_rx_mtag);
3704 std->bnx_rx_mtag = NULL;
3707 "could not create %dth RX mbuf DMA map\n", i);
3713 * Create DMA stuffs for standard RX ring.
3715 error = bnx_dma_block_alloc(sc, BGE_STD_RX_RING_SZ,
3716 &std->bnx_rx_std_ring_tag,
3717 &std->bnx_rx_std_ring_map,
3718 (void *)&std->bnx_rx_std_ring,
3719 &std->bnx_rx_std_ring_paddr);
3721 device_printf(dev, "could not create std RX ring\n");
3726 * Create RX return rings
3728 sc->bnx_rx_ret_ring = kmalloc_cachealign(
3729 sizeof(struct bnx_rx_ret_ring) * sc->bnx_rx_retcnt, M_DEVBUF,
3731 for (i = 0; i < sc->bnx_rx_retcnt; ++i) {
3732 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i];
3736 ret->bnx_rx_cntmax = (BGE_STD_RX_RING_CNT / 4) /
3738 ret->bnx_rx_mask = 1 << i;
3741 ret->bnx_rx_considx =
3742 &sc->bnx_ldata.bnx_status_block->bge_idx[0].bge_rx_prod_idx;
3743 ret->bnx_hw_status_tag =
3744 &sc->bnx_ldata.bnx_status_block->bge_status_tag;
3746 error = bnx_create_rx_ret_ring(ret);
3749 "could not create %dth RX ret ring\n", i);
3757 sc->bnx_tx_ring = kmalloc_cachealign(
3758 sizeof(struct bnx_tx_ring) * sc->bnx_tx_ringcnt, M_DEVBUF,
3760 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
3761 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
3764 txr->bnx_tx_mbx = bnx_tx_mailbox[i];
3767 txr->bnx_tx_considx =
3768 &sc->bnx_ldata.bnx_status_block->bge_idx[0].bge_tx_cons_idx;
3770 error = bnx_create_tx_ring(txr);
3773 "could not create %dth TX ring\n", i);
3779 * Create jumbo buffer pool.
3781 if (BNX_IS_JUMBO_CAPABLE(sc)) {
3782 error = bnx_alloc_jumbo_mem(sc);
3785 "could not create jumbo buffer pool\n");
3794 bnx_dma_block_alloc(struct bnx_softc *sc, bus_size_t size, bus_dma_tag_t *tag,
3795 bus_dmamap_t *map, void **addr, bus_addr_t *paddr)
3800 error = bus_dmamem_coherent(sc->bnx_cdata.bnx_parent_tag, PAGE_SIZE, 0,
3801 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3802 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3806 *tag = dmem.dmem_tag;
3807 *map = dmem.dmem_map;
3808 *addr = dmem.dmem_addr;
3809 *paddr = dmem.dmem_busaddr;
3815 bnx_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr)
3818 bus_dmamap_unload(tag, map);
3819 bus_dmamem_free(tag, addr, map);
3820 bus_dma_tag_destroy(tag);
3825 bnx_tbi_link_upd(struct bnx_softc *sc, uint32_t status)
3827 struct ifnet *ifp = &sc->arpcom.ac_if;
3829 #define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE)
3832 * Sometimes PCS encoding errors are detected in
3833 * TBI mode (on fiber NICs), and for some reason
3834 * the chip will signal them as link changes.
3835 * If we get a link change event, but the 'PCS
3836 * encoding error' bit in the MAC status register
3837 * is set, don't bother doing a link check.
3838 * This avoids spurious "gigabit link up" messages
3839 * that sometimes appear on fiber NICs during
3840 * periods of heavy traffic.
3842 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3843 if (!sc->bnx_link) {
3845 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704) {
3846 BNX_CLRBIT(sc, BGE_MAC_MODE,
3847 BGE_MACMODE_TBI_SEND_CFGS);
3849 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3852 if_printf(ifp, "link UP\n");
3854 ifp->if_link_state = LINK_STATE_UP;
3855 if_link_state_change(ifp);
3857 } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) {
3862 if_printf(ifp, "link DOWN\n");
3864 ifp->if_link_state = LINK_STATE_DOWN;
3865 if_link_state_change(ifp);
3869 #undef PCS_ENCODE_ERR
3871 /* Clear the attention. */
3872 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3873 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3874 BGE_MACSTAT_LINK_CHANGED);
3878 bnx_copper_link_upd(struct bnx_softc *sc, uint32_t status __unused)
3880 struct ifnet *ifp = &sc->arpcom.ac_if;
3881 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3884 bnx_miibus_statchg(sc->bnx_dev);
3888 if_printf(ifp, "link UP\n");
3890 if_printf(ifp, "link DOWN\n");
3893 /* Clear the attention. */
3894 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3895 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3896 BGE_MACSTAT_LINK_CHANGED);
3900 bnx_autopoll_link_upd(struct bnx_softc *sc, uint32_t status __unused)
3902 struct ifnet *ifp = &sc->arpcom.ac_if;
3903 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3907 if (!sc->bnx_link &&
3908 (mii->mii_media_status & IFM_ACTIVE) &&
3909 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3912 if_printf(ifp, "link UP\n");
3913 } else if (sc->bnx_link &&
3914 (!(mii->mii_media_status & IFM_ACTIVE) ||
3915 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3918 if_printf(ifp, "link DOWN\n");
3921 /* Clear the attention. */
3922 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3923 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3924 BGE_MACSTAT_LINK_CHANGED);
3928 bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS)
3930 struct bnx_softc *sc = arg1;
3932 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3933 &sc->bnx_rx_coal_ticks,
3934 BNX_RX_COAL_TICKS_MIN, BNX_RX_COAL_TICKS_MAX,
3935 BNX_RX_COAL_TICKS_CHG);
3939 bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS)
3941 struct bnx_softc *sc = arg1;
3943 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3944 &sc->bnx_tx_coal_ticks,
3945 BNX_TX_COAL_TICKS_MIN, BNX_TX_COAL_TICKS_MAX,
3946 BNX_TX_COAL_TICKS_CHG);
3950 bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS)
3952 struct bnx_softc *sc = arg1;
3954 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3955 &sc->bnx_rx_coal_bds,
3956 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX,
3957 BNX_RX_COAL_BDS_CHG);
3961 bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS)
3963 struct bnx_softc *sc = arg1;
3965 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3966 &sc->bnx_tx_coal_bds,
3967 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX,
3968 BNX_TX_COAL_BDS_CHG);
3972 bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS)
3974 struct bnx_softc *sc = arg1;
3976 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3977 &sc->bnx_rx_coal_bds_int,
3978 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX,
3979 BNX_RX_COAL_BDS_INT_CHG);
3983 bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS)
3985 struct bnx_softc *sc = arg1;
3987 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3988 &sc->bnx_tx_coal_bds_int,
3989 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX,
3990 BNX_TX_COAL_BDS_INT_CHG);
3994 bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal,
3995 int coal_min, int coal_max, uint32_t coal_chg_mask)
3997 struct bnx_softc *sc = arg1;
3998 struct ifnet *ifp = &sc->arpcom.ac_if;
4001 ifnet_serialize_all(ifp);
4004 error = sysctl_handle_int(oidp, &v, 0, req);
4005 if (!error && req->newptr != NULL) {
4006 if (v < coal_min || v > coal_max) {
4010 sc->bnx_coal_chg |= coal_chg_mask;
4012 /* Commit changes */
4013 bnx_coal_change(sc);
4017 ifnet_deserialize_all(ifp);
4022 bnx_coal_change(struct bnx_softc *sc)
4024 struct ifnet *ifp = &sc->arpcom.ac_if;
4026 ASSERT_IFNET_SERIALIZED_ALL(ifp);
4028 if (sc->bnx_coal_chg & BNX_RX_COAL_TICKS_CHG) {
4029 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS,
4030 sc->bnx_rx_coal_ticks);
4032 CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
4035 if_printf(ifp, "rx_coal_ticks -> %u\n",
4036 sc->bnx_rx_coal_ticks);
4040 if (sc->bnx_coal_chg & BNX_TX_COAL_TICKS_CHG) {
4041 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS,
4042 sc->bnx_tx_coal_ticks);
4044 CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS);
4047 if_printf(ifp, "tx_coal_ticks -> %u\n",
4048 sc->bnx_tx_coal_ticks);
4052 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_CHG) {
4053 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS,
4054 sc->bnx_rx_coal_bds);
4056 CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
4059 if_printf(ifp, "rx_coal_bds -> %u\n",
4060 sc->bnx_rx_coal_bds);
4064 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_CHG) {
4065 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS,
4066 sc->bnx_tx_coal_bds);
4068 CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS);
4071 if_printf(ifp, "tx_coal_bds -> %u\n",
4072 sc->bnx_tx_coal_bds);
4076 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_INT_CHG) {
4077 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT,
4078 sc->bnx_rx_coal_bds_int);
4080 CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT);
4083 if_printf(ifp, "rx_coal_bds_int -> %u\n",
4084 sc->bnx_rx_coal_bds_int);
4088 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_INT_CHG) {
4089 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT,
4090 sc->bnx_tx_coal_bds_int);
4092 CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT);
4095 if_printf(ifp, "tx_coal_bds_int -> %u\n",
4096 sc->bnx_tx_coal_bds_int);
4100 sc->bnx_coal_chg = 0;
4104 bnx_check_intr(void *xintr)
4106 struct bnx_intr_data *intr = xintr;
4107 struct bnx_rx_ret_ring *ret;
4108 struct bnx_tx_ring *txr;
4111 lwkt_serialize_enter(intr->bnx_intr_serialize);
4113 KKASSERT(mycpuid == intr->bnx_intr_cpuid);
4115 ifp = &intr->bnx_sc->arpcom.ac_if;
4116 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) {
4117 lwkt_serialize_exit(intr->bnx_intr_serialize);
4121 txr = intr->bnx_txr;
4122 ret = intr->bnx_ret;
4124 if (*ret->bnx_rx_considx != ret->bnx_rx_saved_considx ||
4125 *txr->bnx_tx_considx != txr->bnx_tx_saved_considx) {
4126 if (intr->bnx_rx_check_considx == ret->bnx_rx_saved_considx &&
4127 intr->bnx_tx_check_considx == txr->bnx_tx_saved_considx) {
4128 if (!intr->bnx_intr_maylose) {
4129 intr->bnx_intr_maylose = TRUE;
4133 if_printf(ifp, "lost interrupt\n");
4134 intr->bnx_intr_func(intr->bnx_intr_arg);
4137 intr->bnx_intr_maylose = FALSE;
4138 intr->bnx_rx_check_considx = ret->bnx_rx_saved_considx;
4139 intr->bnx_tx_check_considx = txr->bnx_tx_saved_considx;
4142 callout_reset(&intr->bnx_intr_timer, BNX_INTR_CKINTVL,
4143 intr->bnx_intr_check, intr);
4144 lwkt_serialize_exit(intr->bnx_intr_serialize);
4148 bnx_enable_intr(struct bnx_softc *sc)
4150 struct ifnet *ifp = &sc->arpcom.ac_if;
4151 struct bnx_intr_data *intr;
4154 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
4155 lwkt_serialize_handler_enable(
4156 sc->bnx_intr_data[i].bnx_intr_serialize);
4162 intr = &sc->bnx_intr_data[0]; /* XXX */
4163 bnx_writembx(sc, BGE_MBX_IRQ0_LO, (*intr->bnx_saved_status_tag) << 24);
4164 if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) {
4165 /* XXX Linux driver */
4166 bnx_writembx(sc, BGE_MBX_IRQ0_LO,
4167 (*intr->bnx_saved_status_tag) << 24);
4171 * Unmask the interrupt when we stop polling.
4173 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_MISC_CTL,
4174 BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
4177 * Trigger another interrupt, since above writing
4178 * to interrupt mailbox0 may acknowledge pending
4181 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4183 if (sc->bnx_flags & BNX_FLAG_STATUSTAG_BUG) {
4185 if_printf(ifp, "status tag bug workaround\n");
4187 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
4188 intr = &sc->bnx_intr_data[i];
4189 intr->bnx_intr_maylose = FALSE;
4190 intr->bnx_rx_check_considx = 0;
4191 intr->bnx_tx_check_considx = 0;
4192 callout_reset_bycpu(&intr->bnx_intr_timer,
4193 BNX_INTR_CKINTVL, intr->bnx_intr_check, intr,
4194 intr->bnx_intr_cpuid);
4200 bnx_disable_intr(struct bnx_softc *sc)
4204 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
4205 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
4207 callout_stop(&intr->bnx_intr_timer);
4208 intr->bnx_intr_maylose = FALSE;
4209 intr->bnx_rx_check_considx = 0;
4210 intr->bnx_tx_check_considx = 0;
4214 * Mask the interrupt when we start polling.
4216 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL,
4217 BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
4220 * Acknowledge possible asserted interrupt.
4222 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4224 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
4225 lwkt_serialize_handler_disable(
4226 sc->bnx_intr_data[i].bnx_intr_serialize);
4231 bnx_get_eaddr_mem(struct bnx_softc *sc, uint8_t ether_addr[])
4236 mac_addr = bnx_readmem_ind(sc, 0x0c14);
4237 if ((mac_addr >> 16) == 0x484b) {
4238 ether_addr[0] = (uint8_t)(mac_addr >> 8);
4239 ether_addr[1] = (uint8_t)mac_addr;
4240 mac_addr = bnx_readmem_ind(sc, 0x0c18);
4241 ether_addr[2] = (uint8_t)(mac_addr >> 24);
4242 ether_addr[3] = (uint8_t)(mac_addr >> 16);
4243 ether_addr[4] = (uint8_t)(mac_addr >> 8);
4244 ether_addr[5] = (uint8_t)mac_addr;
4251 bnx_get_eaddr_nvram(struct bnx_softc *sc, uint8_t ether_addr[])
4253 int mac_offset = BGE_EE_MAC_OFFSET;
4255 if (BNX_IS_5717_PLUS(sc)) {
4258 f = pci_get_function(sc->bnx_dev);
4260 mac_offset = BGE_EE_MAC_OFFSET_5717;
4262 mac_offset += BGE_EE_MAC_OFFSET_5717_OFF;
4265 return bnx_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN);
4269 bnx_get_eaddr_eeprom(struct bnx_softc *sc, uint8_t ether_addr[])
4271 if (sc->bnx_flags & BNX_FLAG_NO_EEPROM)
4274 return bnx_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
4279 bnx_get_eaddr(struct bnx_softc *sc, uint8_t eaddr[])
4281 static const bnx_eaddr_fcn_t bnx_eaddr_funcs[] = {
4282 /* NOTE: Order is critical */
4284 bnx_get_eaddr_nvram,
4285 bnx_get_eaddr_eeprom,
4288 const bnx_eaddr_fcn_t *func;
4290 for (func = bnx_eaddr_funcs; *func != NULL; ++func) {
4291 if ((*func)(sc, eaddr) == 0)
4294 return (*func == NULL ? ENXIO : 0);
4298 * NOTE: 'm' is not freed upon failure
4301 bnx_defrag_shortdma(struct mbuf *m)
4307 * If device receive two back-to-back send BDs with less than
4308 * or equal to 8 total bytes then the device may hang. The two
4309 * back-to-back send BDs must in the same frame for this failure
4310 * to occur. Scan mbuf chains and see whether two back-to-back
4311 * send BDs are there. If this is the case, allocate new mbuf
4312 * and copy the frame to workaround the silicon bug.
4314 for (n = m, found = 0; n != NULL; n = n->m_next) {
4325 n = m_defrag(m, MB_DONTWAIT);
4332 bnx_stop_block(struct bnx_softc *sc, bus_size_t reg, uint32_t bit)
4336 BNX_CLRBIT(sc, reg, bit);
4337 for (i = 0; i < BNX_TIMEOUT; i++) {
4338 if ((CSR_READ_4(sc, reg) & bit) == 0)
4345 bnx_link_poll(struct bnx_softc *sc)
4349 status = CSR_READ_4(sc, BGE_MAC_STS);
4350 if ((status & sc->bnx_link_chg) || sc->bnx_link_evt) {
4351 sc->bnx_link_evt = 0;
4352 sc->bnx_link_upd(sc, status);
4357 bnx_enable_msi(struct bnx_softc *sc)
4361 msi_mode = CSR_READ_4(sc, BGE_MSI_MODE);
4362 msi_mode |= BGE_MSIMODE_ENABLE;
4363 if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) {
4366 * 5718-PG105-R says that "one shot" mode
4367 * does not work if MSI is used, however,
4368 * it obviously works.
4370 msi_mode &= ~BGE_MSIMODE_ONESHOT_DISABLE;
4372 CSR_WRITE_4(sc, BGE_MSI_MODE, msi_mode);
4376 bnx_dma_swap_options(struct bnx_softc *sc)
4378 uint32_t dma_options;
4380 dma_options = BGE_MODECTL_WORDSWAP_NONFRAME |
4381 BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA;
4382 #if BYTE_ORDER == BIG_ENDIAN
4383 dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME;
4385 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
4386 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
4387 dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA |
4388 BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE |
4389 BGE_MODECTL_HTX2B_ENABLE;
4395 bnx_setup_tso(struct bnx_tx_ring *txr, struct mbuf **mp,
4396 uint16_t *mss0, uint16_t *flags0)
4401 int thoff, iphlen, hoff, hlen;
4402 uint16_t flags, mss;
4405 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
4407 hoff = m->m_pkthdr.csum_lhlen;
4408 iphlen = m->m_pkthdr.csum_iphlen;
4409 thoff = m->m_pkthdr.csum_thlen;
4411 KASSERT(hoff > 0, ("invalid ether header len"));
4412 KASSERT(iphlen > 0, ("invalid ip header len"));
4413 KASSERT(thoff > 0, ("invalid tcp header len"));
4415 if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
4416 m = m_pullup(m, hoff + iphlen + thoff);
4423 ip = mtodoff(m, struct ip *, hoff);
4424 th = mtodoff(m, struct tcphdr *, hoff + iphlen);
4426 mss = m->m_pkthdr.tso_segsz;
4427 flags = BGE_TXBDFLAG_CPU_PRE_DMA | BGE_TXBDFLAG_CPU_POST_DMA;
4429 ip->ip_len = htons(mss + iphlen + thoff);
4432 hlen = (iphlen + thoff) >> 2;
4433 mss |= ((hlen & 0x3) << 14);
4434 flags |= ((hlen & 0xf8) << 7) | ((hlen & 0x4) << 2);
4443 bnx_create_tx_ring(struct bnx_tx_ring *txr)
4445 bus_size_t txmaxsz, txmaxsegsz;
4448 lwkt_serialize_init(&txr->bnx_tx_serialize);
4451 * Create DMA tag and maps for TX mbufs.
4453 if (txr->bnx_sc->bnx_flags & BNX_FLAG_TSO)
4454 txmaxsz = IP_MAXPACKET + sizeof(struct ether_vlan_header);
4456 txmaxsz = BNX_JUMBO_FRAMELEN;
4457 if (txr->bnx_sc->bnx_asicrev == BGE_ASICREV_BCM57766)
4458 txmaxsegsz = MCLBYTES;
4460 txmaxsegsz = PAGE_SIZE;
4461 error = bus_dma_tag_create(txr->bnx_sc->bnx_cdata.bnx_parent_tag,
4462 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
4463 txmaxsz, BNX_NSEG_NEW, txmaxsegsz,
4464 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
4467 device_printf(txr->bnx_sc->bnx_dev,
4468 "could not create TX mbuf DMA tag\n");
4472 for (i = 0; i < BGE_TX_RING_CNT; i++) {
4473 error = bus_dmamap_create(txr->bnx_tx_mtag,
4474 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
4475 &txr->bnx_tx_buf[i].bnx_tx_dmamap);
4479 for (j = 0; j < i; ++j) {
4480 bus_dmamap_destroy(txr->bnx_tx_mtag,
4481 txr->bnx_tx_buf[j].bnx_tx_dmamap);
4483 bus_dma_tag_destroy(txr->bnx_tx_mtag);
4484 txr->bnx_tx_mtag = NULL;
4486 device_printf(txr->bnx_sc->bnx_dev,
4487 "could not create TX mbuf DMA map\n");
4493 * Create DMA stuffs for TX ring.
4495 error = bnx_dma_block_alloc(txr->bnx_sc, BGE_TX_RING_SZ,
4496 &txr->bnx_tx_ring_tag,
4497 &txr->bnx_tx_ring_map,
4498 (void *)&txr->bnx_tx_ring,
4499 &txr->bnx_tx_ring_paddr);
4501 device_printf(txr->bnx_sc->bnx_dev,
4502 "could not create TX ring\n");
4506 txr->bnx_tx_flags |= BNX_TX_FLAG_SHORTDMA;
4507 txr->bnx_tx_wreg = BNX_TX_WREG_NSEGS;
4513 bnx_destroy_tx_ring(struct bnx_tx_ring *txr)
4515 /* Destroy TX mbuf DMA stuffs. */
4516 if (txr->bnx_tx_mtag != NULL) {
4519 for (i = 0; i < BGE_TX_RING_CNT; i++) {
4520 KKASSERT(txr->bnx_tx_buf[i].bnx_tx_mbuf == NULL);
4521 bus_dmamap_destroy(txr->bnx_tx_mtag,
4522 txr->bnx_tx_buf[i].bnx_tx_dmamap);
4524 bus_dma_tag_destroy(txr->bnx_tx_mtag);
4527 /* Destroy TX ring */
4528 bnx_dma_block_free(txr->bnx_tx_ring_tag,
4529 txr->bnx_tx_ring_map, txr->bnx_tx_ring);
4533 bnx_sysctl_force_defrag(SYSCTL_HANDLER_ARGS)
4535 struct bnx_softc *sc = (void *)arg1;
4536 struct ifnet *ifp = &sc->arpcom.ac_if;
4537 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0];
4538 int error, defrag, i;
4540 if (txr->bnx_tx_flags & BNX_TX_FLAG_FORCE_DEFRAG)
4545 error = sysctl_handle_int(oidp, &defrag, 0, req);
4546 if (error || req->newptr == NULL)
4549 ifnet_serialize_all(ifp);
4550 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
4551 txr = &sc->bnx_tx_ring[i];
4553 txr->bnx_tx_flags |= BNX_TX_FLAG_FORCE_DEFRAG;
4555 txr->bnx_tx_flags &= ~BNX_TX_FLAG_FORCE_DEFRAG;
4557 ifnet_deserialize_all(ifp);
4563 bnx_sysctl_tx_wreg(SYSCTL_HANDLER_ARGS)
4565 struct bnx_softc *sc = (void *)arg1;
4566 struct ifnet *ifp = &sc->arpcom.ac_if;
4567 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0];
4568 int error, tx_wreg, i;
4570 tx_wreg = txr->bnx_tx_wreg;
4571 error = sysctl_handle_int(oidp, &tx_wreg, 0, req);
4572 if (error || req->newptr == NULL)
4575 ifnet_serialize_all(ifp);
4576 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
4577 sc->bnx_tx_ring[i].bnx_tx_wreg = tx_wreg;
4578 ifnet_deserialize_all(ifp);
4584 bnx_create_rx_ret_ring(struct bnx_rx_ret_ring *ret)
4588 lwkt_serialize_init(&ret->bnx_rx_ret_serialize);
4591 * Create DMA stuffs for RX return ring.
4593 error = bnx_dma_block_alloc(ret->bnx_sc,
4594 BGE_RX_RTN_RING_SZ(BNX_RETURN_RING_CNT),
4595 &ret->bnx_rx_ret_ring_tag,
4596 &ret->bnx_rx_ret_ring_map,
4597 (void *)&ret->bnx_rx_ret_ring,
4598 &ret->bnx_rx_ret_ring_paddr);
4600 device_printf(ret->bnx_sc->bnx_dev,
4601 "could not create RX ret ring\n");
4605 /* Shadow standard ring's RX mbuf DMA tag */
4606 ret->bnx_rx_mtag = ret->bnx_std->bnx_rx_mtag;
4609 * Create tmp DMA map for RX mbufs.
4611 error = bus_dmamap_create(ret->bnx_rx_mtag, BUS_DMA_WAITOK,
4612 &ret->bnx_rx_tmpmap);
4614 device_printf(ret->bnx_sc->bnx_dev,
4615 "could not create tmp RX mbuf DMA map\n");
4616 ret->bnx_rx_mtag = NULL;
4623 bnx_destroy_rx_ret_ring(struct bnx_rx_ret_ring *ret)
4625 /* Destroy tmp RX mbuf DMA map */
4626 if (ret->bnx_rx_mtag != NULL)
4627 bus_dmamap_destroy(ret->bnx_rx_mtag, ret->bnx_rx_tmpmap);
4629 /* Destroy RX return ring */
4630 bnx_dma_block_free(ret->bnx_rx_ret_ring_tag,
4631 ret->bnx_rx_ret_ring_map, ret->bnx_rx_ret_ring);
4635 bnx_alloc_intr(struct bnx_softc *sc)
4637 struct bnx_intr_data *intr;
4640 sc->bnx_intr_cnt = 1;
4642 intr = &sc->bnx_intr_data[0];
4644 intr->bnx_ret = &sc->bnx_rx_ret_ring[0];
4645 intr->bnx_txr = &sc->bnx_tx_ring[0];
4646 intr->bnx_intr_serialize = &sc->bnx_main_serialize;
4647 callout_init_mp(&intr->bnx_intr_timer);
4648 intr->bnx_intr_check = bnx_check_intr;
4649 intr->bnx_saved_status_tag = &intr->bnx_ret->bnx_saved_status_tag;
4651 sc->bnx_intr_type = pci_alloc_1intr(sc->bnx_dev, bnx_msi_enable,
4652 &intr->bnx_intr_rid, &intr_flags);
4654 intr->bnx_intr_res = bus_alloc_resource_any(sc->bnx_dev, SYS_RES_IRQ,
4655 &intr->bnx_intr_rid, intr_flags);
4656 if (intr->bnx_intr_res == NULL) {
4657 device_printf(sc->bnx_dev, "could not alloc interrupt\n");
4661 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI) {
4662 sc->bnx_flags |= BNX_FLAG_ONESHOT_MSI;
4665 if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) {
4666 intr->bnx_intr_func = bnx_msi_oneshot;
4668 device_printf(sc->bnx_dev, "oneshot MSI\n");
4670 intr->bnx_intr_func = bnx_msi;
4673 intr->bnx_intr_func = bnx_intr_legacy;
4675 intr->bnx_intr_arg = sc;
4676 intr->bnx_intr_cpuid = rman_get_cpuid(intr->bnx_intr_res);
4678 intr->bnx_txr->bnx_tx_cpuid = intr->bnx_intr_cpuid;
4684 bnx_setup_intr(struct bnx_softc *sc)
4688 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
4689 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
4691 error = bus_setup_intr_descr(sc->bnx_dev, intr->bnx_intr_res,
4692 INTR_MPSAFE, intr->bnx_intr_func, intr->bnx_intr_arg,
4693 &intr->bnx_intr_hand, intr->bnx_intr_serialize,
4694 intr->bnx_intr_desc);
4696 device_printf(sc->bnx_dev,
4697 "could not set up %dth intr\n", i);
4698 bnx_teardown_intr(sc, i);
4706 bnx_teardown_intr(struct bnx_softc *sc, int cnt)
4710 for (i = 0; i < cnt; ++i) {
4711 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
4713 bus_teardown_intr(sc->bnx_dev, intr->bnx_intr_res,
4714 intr->bnx_intr_hand);
4719 bnx_free_intr(struct bnx_softc *sc)
4721 struct bnx_intr_data *intr;
4723 KKASSERT(sc->bnx_intr_cnt <= 1);
4724 intr = &sc->bnx_intr_data[0];
4726 if (intr->bnx_intr_res != NULL) {
4727 bus_release_resource(sc->bnx_dev, SYS_RES_IRQ,
4728 intr->bnx_intr_rid, intr->bnx_intr_res);
4730 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI)
4731 pci_release_msi(sc->bnx_dev);
4735 bnx_setup_serialize(struct bnx_softc *sc)
4740 * Allocate serializer array
4743 /* Main + RX STD + TX + RX RET */
4744 sc->bnx_serialize_cnt = 1 + 1 + sc->bnx_tx_ringcnt + sc->bnx_rx_retcnt;
4747 kmalloc(sc->bnx_serialize_cnt * sizeof(struct lwkt_serialize *),
4748 M_DEVBUF, M_WAITOK | M_ZERO);
4753 * NOTE: Order is critical
4758 KKASSERT(i < sc->bnx_serialize_cnt);
4759 sc->bnx_serialize[i++] = &sc->bnx_main_serialize;
4761 KKASSERT(i < sc->bnx_serialize_cnt);
4762 sc->bnx_serialize[i++] = &sc->bnx_rx_std_ring.bnx_rx_std_serialize;
4764 for (j = 0; j < sc->bnx_rx_retcnt; ++j) {
4765 KKASSERT(i < sc->bnx_serialize_cnt);
4766 sc->bnx_serialize[i++] =
4767 &sc->bnx_rx_ret_ring[j].bnx_rx_ret_serialize;
4770 for (j = 0; j < sc->bnx_tx_ringcnt; ++j) {
4771 KKASSERT(i < sc->bnx_serialize_cnt);
4772 sc->bnx_serialize[i++] =
4773 &sc->bnx_tx_ring[j].bnx_tx_serialize;
4776 KKASSERT(i == sc->bnx_serialize_cnt);
4780 bnx_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
4782 struct bnx_softc *sc = ifp->if_softc;
4784 ifnet_serialize_array_enter(sc->bnx_serialize,
4785 sc->bnx_serialize_cnt, slz);
4789 bnx_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
4791 struct bnx_softc *sc = ifp->if_softc;
4793 ifnet_serialize_array_exit(sc->bnx_serialize,
4794 sc->bnx_serialize_cnt, slz);
4798 bnx_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
4800 struct bnx_softc *sc = ifp->if_softc;
4802 return ifnet_serialize_array_try(sc->bnx_serialize,
4803 sc->bnx_serialize_cnt, slz);
4809 bnx_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
4810 boolean_t serialized)
4812 struct bnx_softc *sc = ifp->if_softc;
4814 ifnet_serialize_array_assert(sc->bnx_serialize, sc->bnx_serialize_cnt,
4818 #endif /* INVARIANTS */
4820 #ifdef IFPOLL_ENABLE
4823 bnx_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS)
4825 struct bnx_softc *sc = (void *)arg1;
4826 struct ifnet *ifp = &sc->arpcom.ac_if;
4829 off = sc->bnx_npoll_rxoff;
4830 error = sysctl_handle_int(oidp, &off, 0, req);
4831 if (error || req->newptr == NULL)
4836 ifnet_serialize_all(ifp);
4837 if (off >= ncpus2 || off % sc->bnx_rx_retcnt != 0) {
4841 sc->bnx_npoll_txoff = off;
4842 sc->bnx_npoll_rxoff = off;
4844 ifnet_deserialize_all(ifp);
4850 bnx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS)
4852 struct bnx_softc *sc = (void *)arg1;
4853 struct ifnet *ifp = &sc->arpcom.ac_if;
4856 off = sc->bnx_npoll_rxoff;
4857 error = sysctl_handle_int(oidp, &off, 0, req);
4858 if (error || req->newptr == NULL)
4863 ifnet_serialize_all(ifp);
4864 if (off >= ncpus2 || off % sc->bnx_rx_retcnt != 0) {
4868 sc->bnx_npoll_rxoff = off;
4870 ifnet_deserialize_all(ifp);
4876 bnx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS)
4878 struct bnx_softc *sc = (void *)arg1;
4879 struct ifnet *ifp = &sc->arpcom.ac_if;
4882 off = sc->bnx_npoll_txoff;
4883 error = sysctl_handle_int(oidp, &off, 0, req);
4884 if (error || req->newptr == NULL)
4889 ifnet_serialize_all(ifp);
4890 if (off >= ncpus2) {
4894 sc->bnx_npoll_txoff = off;
4896 ifnet_deserialize_all(ifp);
4901 #endif /* IFPOLL_ENABLE */
4904 bnx_set_tick_cpuid(struct bnx_softc *sc, boolean_t polling)
4907 sc->bnx_tick_cpuid = 0; /* XXX */
4909 sc->bnx_tick_cpuid = sc->bnx_intr_data[0].bnx_intr_cpuid;
4913 bnx_rx_std_refill_ithread(void *xstd)
4915 struct bnx_rx_std_ring *std = xstd;
4916 struct globaldata *gd = mycpu;
4920 while (!std->bnx_rx_std_stop) {
4921 if (std->bnx_rx_std_refill) {
4922 lwkt_serialize_handler_call(
4923 &std->bnx_rx_std_serialize,
4924 bnx_rx_std_refill, std, NULL);
4930 if (!std->bnx_rx_std_refill && !std->bnx_rx_std_stop) {
4931 lwkt_deschedule_self(gd->gd_curthread);
4944 bnx_rx_std_refill(void *xstd, void *frame __unused)
4946 struct bnx_rx_std_ring *std = xstd;
4952 check_idx = std->bnx_rx_std;
4955 refill = std->bnx_rx_std_refill;
4956 atomic_clear_int(&std->bnx_rx_std_refill, refill);
4959 struct bnx_rx_buf *rb;
4961 BNX_INC(check_idx, BGE_STD_RX_RING_CNT);
4962 rb = &std->bnx_rx_std_buf[check_idx];
4964 if (rb->bnx_rx_refilled) {
4966 bnx_setup_rxdesc_std(std, check_idx);
4967 std->bnx_rx_std = check_idx;
4975 bnx_writembx(std->bnx_sc, BGE_MBX_RX_STD_PROD_LO,
4979 if (std->bnx_rx_std_refill)
4982 atomic_poll_release_int(&std->bnx_rx_std_running);
4985 if (std->bnx_rx_std_refill)
4990 bnx_sysctl_std_refill(SYSCTL_HANDLER_ARGS)
4992 struct bnx_softc *sc = (void *)arg1;
4993 struct ifnet *ifp = &sc->arpcom.ac_if;
4994 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0];
4995 int error, cntmax, i;
4997 cntmax = ret->bnx_rx_cntmax;
4998 error = sysctl_handle_int(oidp, &cntmax, 0, req);
4999 if (error || req->newptr == NULL)
5002 ifnet_serialize_all(ifp);
5004 if ((cntmax * sc->bnx_rx_retcnt) > BGE_STD_RX_RING_CNT / 2) {
5009 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
5010 sc->bnx_rx_ret_ring[i].bnx_rx_cntmax = cntmax;
5014 ifnet_deserialize_all(ifp);