2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
37 #include "opt_ifpoll.h"
39 #include <sys/param.h>
41 #include <sys/endian.h>
42 #include <sys/kernel.h>
43 #include <sys/interrupt.h>
45 #include <sys/malloc.h>
46 #include <sys/queue.h>
48 #include <sys/serialize.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/sysctl.h>
53 #include <netinet/ip.h>
54 #include <netinet/tcp.h>
57 #include <net/ethernet.h>
59 #include <net/if_arp.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/if_poll.h>
63 #include <net/if_types.h>
64 #include <net/ifq_var.h>
65 #include <net/toeplitz.h>
66 #include <net/toeplitz2.h>
67 #include <net/vlan/if_vlan_var.h>
68 #include <net/vlan/if_vlan_ether.h>
70 #include <dev/netif/mii_layer/mii.h>
71 #include <dev/netif/mii_layer/miivar.h>
72 #include <dev/netif/mii_layer/brgphyreg.h>
74 #include <bus/pci/pcidevs.h>
75 #include <bus/pci/pcireg.h>
76 #include <bus/pci/pcivar.h>
78 #include <dev/netif/bge/if_bgereg.h>
79 #include <dev/netif/bnx/if_bnxvar.h>
81 /* "device miibus" required. See GENERIC if you get errors here. */
82 #include "miibus_if.h"
84 #define BNX_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
86 #define BNX_INTR_CKINTVL ((10 * hz) / 1000) /* 10ms */
89 #define BNX_RSS_DPRINTF(sc, lvl, fmt, ...) \
91 if (sc->bnx_rss_debug >= lvl) \
92 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \
94 #else /* !BNX_RSS_DEBUG */
95 #define BNX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
96 #endif /* BNX_RSS_DEBUG */
98 static const struct bnx_type {
103 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717,
104 "Broadcom BCM5717 Gigabit Ethernet" },
105 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717C,
106 "Broadcom BCM5717C Gigabit Ethernet" },
107 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718,
108 "Broadcom BCM5718 Gigabit Ethernet" },
109 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5719,
110 "Broadcom BCM5719 Gigabit Ethernet" },
111 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720_ALT,
112 "Broadcom BCM5720 Gigabit Ethernet" },
114 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5725,
115 "Broadcom BCM5725 Gigabit Ethernet" },
116 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5727,
117 "Broadcom BCM5727 Gigabit Ethernet" },
118 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5762,
119 "Broadcom BCM5762 Gigabit Ethernet" },
121 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57761,
122 "Broadcom BCM57761 Gigabit Ethernet" },
123 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57762,
124 "Broadcom BCM57762 Gigabit Ethernet" },
125 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57765,
126 "Broadcom BCM57765 Gigabit Ethernet" },
127 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57766,
128 "Broadcom BCM57766 Gigabit Ethernet" },
129 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57781,
130 "Broadcom BCM57781 Gigabit Ethernet" },
131 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57782,
132 "Broadcom BCM57782 Gigabit Ethernet" },
133 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57785,
134 "Broadcom BCM57785 Gigabit Ethernet" },
135 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57786,
136 "Broadcom BCM57786 Gigabit Ethernet" },
137 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57791,
138 "Broadcom BCM57791 Fast Ethernet" },
139 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57795,
140 "Broadcom BCM57795 Fast Ethernet" },
145 static const int bnx_tx_mailbox[BNX_TX_RING_MAX] = {
146 BGE_MBX_TX_HOST_PROD0_LO,
147 BGE_MBX_TX_HOST_PROD0_HI,
148 BGE_MBX_TX_HOST_PROD1_LO,
149 BGE_MBX_TX_HOST_PROD1_HI
152 #define BNX_IS_JUMBO_CAPABLE(sc) ((sc)->bnx_flags & BNX_FLAG_JUMBO)
153 #define BNX_IS_5717_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_5717_PLUS)
154 #define BNX_IS_57765_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_57765_PLUS)
155 #define BNX_IS_57765_FAMILY(sc) \
156 ((sc)->bnx_flags & BNX_FLAG_57765_FAMILY)
158 typedef int (*bnx_eaddr_fcn_t)(struct bnx_softc *, uint8_t[]);
160 static int bnx_probe(device_t);
161 static int bnx_attach(device_t);
162 static int bnx_detach(device_t);
163 static void bnx_shutdown(device_t);
164 static int bnx_suspend(device_t);
165 static int bnx_resume(device_t);
166 static int bnx_miibus_readreg(device_t, int, int);
167 static int bnx_miibus_writereg(device_t, int, int, int);
168 static void bnx_miibus_statchg(device_t);
170 static int bnx_handle_status(struct bnx_softc *);
172 static void bnx_npoll(struct ifnet *, struct ifpoll_info *);
173 static void bnx_npoll_rx(struct ifnet *, void *, int);
174 static void bnx_npoll_tx(struct ifnet *, void *, int);
175 static void bnx_npoll_tx_notag(struct ifnet *, void *, int);
176 static void bnx_npoll_status(struct ifnet *);
177 static void bnx_npoll_status_notag(struct ifnet *);
179 static void bnx_intr_legacy(void *);
180 static void bnx_msi(void *);
181 static void bnx_intr(struct bnx_softc *);
182 static void bnx_msix_status(void *);
183 static void bnx_msix_tx_status(void *);
184 static void bnx_msix_rx(void *);
185 static void bnx_msix_rxtx(void *);
186 static void bnx_enable_intr(struct bnx_softc *);
187 static void bnx_disable_intr(struct bnx_softc *);
188 static void bnx_txeof(struct bnx_tx_ring *, uint16_t);
189 static void bnx_rxeof(struct bnx_rx_ret_ring *, uint16_t, int);
190 static int bnx_alloc_intr(struct bnx_softc *);
191 static int bnx_setup_intr(struct bnx_softc *);
192 static void bnx_free_intr(struct bnx_softc *);
193 static void bnx_teardown_intr(struct bnx_softc *, int);
194 static int bnx_alloc_msix(struct bnx_softc *);
195 static void bnx_free_msix(struct bnx_softc *, boolean_t);
196 static void bnx_check_intr_rxtx(void *);
197 static void bnx_check_intr_rx(void *);
198 static void bnx_check_intr_tx(void *);
199 static void bnx_rx_std_refill_ithread(void *);
200 static void bnx_rx_std_refill(void *, void *);
201 static void bnx_rx_std_refill_sched_ipi(void *);
202 static void bnx_rx_std_refill_stop(void *);
203 static void bnx_rx_std_refill_sched(struct bnx_rx_ret_ring *,
204 struct bnx_rx_std_ring *);
206 static void bnx_start(struct ifnet *, struct ifaltq_subque *);
207 static int bnx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
208 static void bnx_init(void *);
209 static void bnx_stop(struct bnx_softc *);
210 static void bnx_watchdog(struct ifaltq_subque *);
211 static int bnx_ifmedia_upd(struct ifnet *);
212 static void bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *);
213 static void bnx_tick(void *);
214 static void bnx_serialize(struct ifnet *, enum ifnet_serialize);
215 static void bnx_deserialize(struct ifnet *, enum ifnet_serialize);
216 static int bnx_tryserialize(struct ifnet *, enum ifnet_serialize);
218 static void bnx_serialize_assert(struct ifnet *, enum ifnet_serialize,
221 static void bnx_serialize_skipmain(struct bnx_softc *);
222 static void bnx_deserialize_skipmain(struct bnx_softc *sc);
224 static int bnx_alloc_jumbo_mem(struct bnx_softc *);
225 static void bnx_free_jumbo_mem(struct bnx_softc *);
226 static struct bnx_jslot
227 *bnx_jalloc(struct bnx_softc *);
228 static void bnx_jfree(void *);
229 static void bnx_jref(void *);
230 static int bnx_newbuf_std(struct bnx_rx_ret_ring *, int, int);
231 static int bnx_newbuf_jumbo(struct bnx_softc *, int, int);
232 static void bnx_setup_rxdesc_std(struct bnx_rx_std_ring *, int);
233 static void bnx_setup_rxdesc_jumbo(struct bnx_softc *, int);
234 static int bnx_init_rx_ring_std(struct bnx_rx_std_ring *);
235 static void bnx_free_rx_ring_std(struct bnx_rx_std_ring *);
236 static int bnx_init_rx_ring_jumbo(struct bnx_softc *);
237 static void bnx_free_rx_ring_jumbo(struct bnx_softc *);
238 static void bnx_free_tx_ring(struct bnx_tx_ring *);
239 static int bnx_init_tx_ring(struct bnx_tx_ring *);
240 static int bnx_create_tx_ring(struct bnx_tx_ring *);
241 static void bnx_destroy_tx_ring(struct bnx_tx_ring *);
242 static int bnx_create_rx_ret_ring(struct bnx_rx_ret_ring *);
243 static void bnx_destroy_rx_ret_ring(struct bnx_rx_ret_ring *);
244 static int bnx_dma_alloc(device_t);
245 static void bnx_dma_free(struct bnx_softc *);
246 static int bnx_dma_block_alloc(struct bnx_softc *, bus_size_t,
247 bus_dma_tag_t *, bus_dmamap_t *, void **, bus_addr_t *);
248 static void bnx_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
250 bnx_defrag_shortdma(struct mbuf *);
251 static int bnx_encap(struct bnx_tx_ring *, struct mbuf **,
253 static int bnx_setup_tso(struct bnx_tx_ring *, struct mbuf **,
254 uint16_t *, uint16_t *);
255 static void bnx_setup_serialize(struct bnx_softc *);
256 static void bnx_set_tick_cpuid(struct bnx_softc *, boolean_t);
257 static void bnx_setup_ring_cnt(struct bnx_softc *);
259 static struct pktinfo *bnx_rss_info(struct pktinfo *,
260 const struct bge_rx_bd *);
261 static void bnx_init_rss(struct bnx_softc *);
262 static void bnx_reset(struct bnx_softc *);
263 static int bnx_chipinit(struct bnx_softc *);
264 static int bnx_blockinit(struct bnx_softc *);
265 static void bnx_stop_block(struct bnx_softc *, bus_size_t, uint32_t);
266 static void bnx_enable_msi(struct bnx_softc *, boolean_t);
267 static void bnx_setmulti(struct bnx_softc *);
268 static void bnx_setpromisc(struct bnx_softc *);
269 static void bnx_stats_update_regs(struct bnx_softc *);
270 static uint32_t bnx_dma_swap_options(struct bnx_softc *);
272 static uint32_t bnx_readmem_ind(struct bnx_softc *, uint32_t);
273 static void bnx_writemem_ind(struct bnx_softc *, uint32_t, uint32_t);
275 static uint32_t bnx_readreg_ind(struct bnx_softc *, uint32_t);
277 static void bnx_writemem_direct(struct bnx_softc *, uint32_t, uint32_t);
278 static void bnx_writembx(struct bnx_softc *, int, int);
279 static int bnx_read_nvram(struct bnx_softc *, caddr_t, int, int);
280 static uint8_t bnx_eeprom_getbyte(struct bnx_softc *, uint32_t, uint8_t *);
281 static int bnx_read_eeprom(struct bnx_softc *, caddr_t, uint32_t, size_t);
283 static void bnx_tbi_link_upd(struct bnx_softc *, uint32_t);
284 static void bnx_copper_link_upd(struct bnx_softc *, uint32_t);
285 static void bnx_autopoll_link_upd(struct bnx_softc *, uint32_t);
286 static void bnx_link_poll(struct bnx_softc *);
288 static int bnx_get_eaddr_mem(struct bnx_softc *, uint8_t[]);
289 static int bnx_get_eaddr_nvram(struct bnx_softc *, uint8_t[]);
290 static int bnx_get_eaddr_eeprom(struct bnx_softc *, uint8_t[]);
291 static int bnx_get_eaddr(struct bnx_softc *, uint8_t[]);
293 static void bnx_coal_change(struct bnx_softc *);
294 static int bnx_sysctl_force_defrag(SYSCTL_HANDLER_ARGS);
295 static int bnx_sysctl_tx_wreg(SYSCTL_HANDLER_ARGS);
296 static int bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS);
297 static int bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS);
298 static int bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS);
299 static int bnx_sysctl_rx_coal_bds_poll(SYSCTL_HANDLER_ARGS);
300 static int bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS);
301 static int bnx_sysctl_tx_coal_bds_poll(SYSCTL_HANDLER_ARGS);
302 static int bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS);
303 static int bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS);
304 static int bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *,
307 static int bnx_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS);
308 static int bnx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS);
309 static int bnx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS);
311 static int bnx_sysctl_std_refill(SYSCTL_HANDLER_ARGS);
313 static int bnx_msi_enable = 1;
314 static int bnx_msix_enable = 1;
316 static int bnx_rx_rings = 0; /* auto */
317 static int bnx_tx_rings = 0; /* auto */
319 TUNABLE_INT("hw.bnx.msi.enable", &bnx_msi_enable);
320 TUNABLE_INT("hw.bnx.msix.enable", &bnx_msix_enable);
321 TUNABLE_INT("hw.bnx.rx_rings", &bnx_rx_rings);
322 TUNABLE_INT("hw.bnx.tx_rings", &bnx_tx_rings);
324 static device_method_t bnx_methods[] = {
325 /* Device interface */
326 DEVMETHOD(device_probe, bnx_probe),
327 DEVMETHOD(device_attach, bnx_attach),
328 DEVMETHOD(device_detach, bnx_detach),
329 DEVMETHOD(device_shutdown, bnx_shutdown),
330 DEVMETHOD(device_suspend, bnx_suspend),
331 DEVMETHOD(device_resume, bnx_resume),
334 DEVMETHOD(bus_print_child, bus_generic_print_child),
335 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
338 DEVMETHOD(miibus_readreg, bnx_miibus_readreg),
339 DEVMETHOD(miibus_writereg, bnx_miibus_writereg),
340 DEVMETHOD(miibus_statchg, bnx_miibus_statchg),
345 static DEFINE_CLASS_0(bnx, bnx_driver, bnx_methods, sizeof(struct bnx_softc));
346 static devclass_t bnx_devclass;
348 DECLARE_DUMMY_MODULE(if_bnx);
349 DRIVER_MODULE(if_bnx, pci, bnx_driver, bnx_devclass, NULL, NULL);
350 DRIVER_MODULE(miibus, bnx, miibus_driver, miibus_devclass, NULL, NULL);
353 bnx_readmem_ind(struct bnx_softc *sc, uint32_t off)
355 device_t dev = sc->bnx_dev;
358 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
359 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
360 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
365 bnx_writemem_ind(struct bnx_softc *sc, uint32_t off, uint32_t val)
367 device_t dev = sc->bnx_dev;
369 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
370 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
371 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
375 bnx_writemem_direct(struct bnx_softc *sc, uint32_t off, uint32_t val)
377 CSR_WRITE_4(sc, off, val);
381 bnx_writembx(struct bnx_softc *sc, int off, int val)
383 CSR_WRITE_4(sc, off, val);
387 * Read a sequence of bytes from NVRAM.
390 bnx_read_nvram(struct bnx_softc *sc, caddr_t dest, int off, int cnt)
396 * Read a byte of data stored in the EEPROM at address 'addr.' The
397 * BCM570x supports both the traditional bitbang interface and an
398 * auto access interface for reading the EEPROM. We use the auto
402 bnx_eeprom_getbyte(struct bnx_softc *sc, uint32_t addr, uint8_t *dest)
408 * Enable use of auto EEPROM access so we can avoid
409 * having to use the bitbang method.
411 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
413 /* Reset the EEPROM, load the clock period. */
414 CSR_WRITE_4(sc, BGE_EE_ADDR,
415 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
418 /* Issue the read EEPROM command. */
419 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
421 /* Wait for completion */
422 for(i = 0; i < BNX_TIMEOUT * 10; i++) {
424 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
428 if (i == BNX_TIMEOUT) {
429 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
434 byte = CSR_READ_4(sc, BGE_EE_DATA);
436 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
442 * Read a sequence of bytes from the EEPROM.
445 bnx_read_eeprom(struct bnx_softc *sc, caddr_t dest, uint32_t off, size_t len)
451 for (byte = 0, err = 0, i = 0; i < len; i++) {
452 err = bnx_eeprom_getbyte(sc, off + i, &byte);
462 bnx_miibus_readreg(device_t dev, int phy, int reg)
464 struct bnx_softc *sc = device_get_softc(dev);
468 KASSERT(phy == sc->bnx_phyno,
469 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno));
471 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
472 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
473 CSR_WRITE_4(sc, BGE_MI_MODE,
474 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
478 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
479 BGE_MIPHY(phy) | BGE_MIREG(reg));
481 /* Poll for the PHY register access to complete. */
482 for (i = 0; i < BNX_TIMEOUT; i++) {
484 val = CSR_READ_4(sc, BGE_MI_COMM);
485 if ((val & BGE_MICOMM_BUSY) == 0) {
487 val = CSR_READ_4(sc, BGE_MI_COMM);
491 if (i == BNX_TIMEOUT) {
492 if_printf(&sc->arpcom.ac_if, "PHY read timed out "
493 "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val);
497 /* Restore the autopoll bit if necessary. */
498 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
499 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
503 if (val & BGE_MICOMM_READFAIL)
506 return (val & 0xFFFF);
510 bnx_miibus_writereg(device_t dev, int phy, int reg, int val)
512 struct bnx_softc *sc = device_get_softc(dev);
515 KASSERT(phy == sc->bnx_phyno,
516 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno));
518 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
519 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
520 CSR_WRITE_4(sc, BGE_MI_MODE,
521 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
525 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
526 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
528 for (i = 0; i < BNX_TIMEOUT; i++) {
530 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
532 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
536 if (i == BNX_TIMEOUT) {
537 if_printf(&sc->arpcom.ac_if, "PHY write timed out "
538 "(phy %d, reg %d, val %d)\n", phy, reg, val);
541 /* Restore the autopoll bit if necessary. */
542 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
543 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
551 bnx_miibus_statchg(device_t dev)
553 struct bnx_softc *sc;
554 struct mii_data *mii;
556 sc = device_get_softc(dev);
557 mii = device_get_softc(sc->bnx_miibus);
559 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
560 (IFM_ACTIVE | IFM_AVALID)) {
561 switch (IFM_SUBTYPE(mii->mii_media_active)) {
578 if (sc->bnx_link == 0)
581 BNX_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
582 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
583 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
584 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
586 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
589 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
590 BNX_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
592 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
597 * Memory management for jumbo frames.
600 bnx_alloc_jumbo_mem(struct bnx_softc *sc)
602 struct ifnet *ifp = &sc->arpcom.ac_if;
603 struct bnx_jslot *entry;
609 * Create tag for jumbo mbufs.
610 * This is really a bit of a kludge. We allocate a special
611 * jumbo buffer pool which (thanks to the way our DMA
612 * memory allocation works) will consist of contiguous
613 * pages. This means that even though a jumbo buffer might
614 * be larger than a page size, we don't really need to
615 * map it into more than one DMA segment. However, the
616 * default mbuf tag will result in multi-segment mappings,
617 * so we have to create a special jumbo mbuf tag that
618 * lets us get away with mapping the jumbo buffers as
619 * a single segment. I think eventually the driver should
620 * be changed so that it uses ordinary mbufs and cluster
621 * buffers, i.e. jumbo frames can span multiple DMA
622 * descriptors. But that's a project for another day.
626 * Create DMA stuffs for jumbo RX ring.
628 error = bnx_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ,
629 &sc->bnx_cdata.bnx_rx_jumbo_ring_tag,
630 &sc->bnx_cdata.bnx_rx_jumbo_ring_map,
631 (void *)&sc->bnx_ldata.bnx_rx_jumbo_ring,
632 &sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
634 if_printf(ifp, "could not create jumbo RX ring\n");
639 * Create DMA stuffs for jumbo buffer block.
641 error = bnx_dma_block_alloc(sc, BNX_JMEM,
642 &sc->bnx_cdata.bnx_jumbo_tag,
643 &sc->bnx_cdata.bnx_jumbo_map,
644 (void **)&sc->bnx_ldata.bnx_jumbo_buf,
647 if_printf(ifp, "could not create jumbo buffer\n");
651 SLIST_INIT(&sc->bnx_jfree_listhead);
654 * Now divide it up into 9K pieces and save the addresses
655 * in an array. Note that we play an evil trick here by using
656 * the first few bytes in the buffer to hold the the address
657 * of the softc structure for this interface. This is because
658 * bnx_jfree() needs it, but it is called by the mbuf management
659 * code which will not pass it to us explicitly.
661 for (i = 0, ptr = sc->bnx_ldata.bnx_jumbo_buf; i < BNX_JSLOTS; i++) {
662 entry = &sc->bnx_cdata.bnx_jslots[i];
664 entry->bnx_buf = ptr;
665 entry->bnx_paddr = paddr;
666 entry->bnx_inuse = 0;
668 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead, entry, jslot_link);
677 bnx_free_jumbo_mem(struct bnx_softc *sc)
679 /* Destroy jumbo RX ring. */
680 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_jumbo_ring_tag,
681 sc->bnx_cdata.bnx_rx_jumbo_ring_map,
682 sc->bnx_ldata.bnx_rx_jumbo_ring);
684 /* Destroy jumbo buffer block. */
685 bnx_dma_block_free(sc->bnx_cdata.bnx_jumbo_tag,
686 sc->bnx_cdata.bnx_jumbo_map,
687 sc->bnx_ldata.bnx_jumbo_buf);
691 * Allocate a jumbo buffer.
693 static struct bnx_jslot *
694 bnx_jalloc(struct bnx_softc *sc)
696 struct bnx_jslot *entry;
698 lwkt_serialize_enter(&sc->bnx_jslot_serializer);
699 entry = SLIST_FIRST(&sc->bnx_jfree_listhead);
701 SLIST_REMOVE_HEAD(&sc->bnx_jfree_listhead, jslot_link);
702 entry->bnx_inuse = 1;
704 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
706 lwkt_serialize_exit(&sc->bnx_jslot_serializer);
711 * Adjust usage count on a jumbo buffer.
716 struct bnx_jslot *entry = (struct bnx_jslot *)arg;
717 struct bnx_softc *sc = entry->bnx_sc;
720 panic("bnx_jref: can't find softc pointer!");
722 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) {
723 panic("bnx_jref: asked to reference buffer "
724 "that we don't manage!");
725 } else if (entry->bnx_inuse == 0) {
726 panic("bnx_jref: buffer already free!");
728 atomic_add_int(&entry->bnx_inuse, 1);
733 * Release a jumbo buffer.
738 struct bnx_jslot *entry = (struct bnx_jslot *)arg;
739 struct bnx_softc *sc = entry->bnx_sc;
742 panic("bnx_jfree: can't find softc pointer!");
744 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) {
745 panic("bnx_jfree: asked to free buffer that we don't manage!");
746 } else if (entry->bnx_inuse == 0) {
747 panic("bnx_jfree: buffer already free!");
750 * Possible MP race to 0, use the serializer. The atomic insn
751 * is still needed for races against bnx_jref().
753 lwkt_serialize_enter(&sc->bnx_jslot_serializer);
754 atomic_subtract_int(&entry->bnx_inuse, 1);
755 if (entry->bnx_inuse == 0) {
756 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead,
759 lwkt_serialize_exit(&sc->bnx_jslot_serializer);
765 * Intialize a standard receive ring descriptor.
768 bnx_newbuf_std(struct bnx_rx_ret_ring *ret, int i, int init)
770 struct mbuf *m_new = NULL;
771 bus_dma_segment_t seg;
774 struct bnx_rx_buf *rb;
776 rb = &ret->bnx_std->bnx_rx_std_buf[i];
777 KASSERT(!rb->bnx_rx_refilled, ("RX buf %dth has been refilled", i));
779 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
784 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
785 m_adj(m_new, ETHER_ALIGN);
787 error = bus_dmamap_load_mbuf_segment(ret->bnx_rx_mtag,
788 ret->bnx_rx_tmpmap, m_new, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
795 bus_dmamap_sync(ret->bnx_rx_mtag, rb->bnx_rx_dmamap,
796 BUS_DMASYNC_POSTREAD);
797 bus_dmamap_unload(ret->bnx_rx_mtag, rb->bnx_rx_dmamap);
800 map = ret->bnx_rx_tmpmap;
801 ret->bnx_rx_tmpmap = rb->bnx_rx_dmamap;
803 rb->bnx_rx_dmamap = map;
804 rb->bnx_rx_mbuf = m_new;
805 rb->bnx_rx_paddr = seg.ds_addr;
806 rb->bnx_rx_len = m_new->m_len;
809 rb->bnx_rx_refilled = 1;
814 bnx_setup_rxdesc_std(struct bnx_rx_std_ring *std, int i)
816 struct bnx_rx_buf *rb;
821 rb = &std->bnx_rx_std_buf[i];
822 KASSERT(rb->bnx_rx_refilled, ("RX buf %dth is not refilled", i));
824 paddr = rb->bnx_rx_paddr;
825 len = rb->bnx_rx_len;
829 rb->bnx_rx_refilled = 0;
831 r = &std->bnx_rx_std_ring[i];
832 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(paddr);
833 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(paddr);
836 r->bge_flags = BGE_RXBDFLAG_END;
840 * Initialize a jumbo receive ring descriptor. This allocates
841 * a jumbo buffer from the pool managed internally by the driver.
844 bnx_newbuf_jumbo(struct bnx_softc *sc, int i, int init)
846 struct mbuf *m_new = NULL;
847 struct bnx_jslot *buf;
850 /* Allocate the mbuf. */
851 MGETHDR(m_new, init ? MB_WAIT : MB_DONTWAIT, MT_DATA);
855 /* Allocate the jumbo buffer */
856 buf = bnx_jalloc(sc);
862 /* Attach the buffer to the mbuf. */
863 m_new->m_ext.ext_arg = buf;
864 m_new->m_ext.ext_buf = buf->bnx_buf;
865 m_new->m_ext.ext_free = bnx_jfree;
866 m_new->m_ext.ext_ref = bnx_jref;
867 m_new->m_ext.ext_size = BNX_JUMBO_FRAMELEN;
869 m_new->m_flags |= M_EXT;
871 m_new->m_data = m_new->m_ext.ext_buf;
872 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
874 paddr = buf->bnx_paddr;
875 m_adj(m_new, ETHER_ALIGN);
876 paddr += ETHER_ALIGN;
878 /* Save necessary information */
879 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_rx_mbuf = m_new;
880 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_rx_paddr = paddr;
882 /* Set up the descriptor. */
883 bnx_setup_rxdesc_jumbo(sc, i);
888 bnx_setup_rxdesc_jumbo(struct bnx_softc *sc, int i)
891 struct bnx_rx_buf *rc;
893 r = &sc->bnx_ldata.bnx_rx_jumbo_ring[i];
894 rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i];
896 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_rx_paddr);
897 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_rx_paddr);
898 r->bge_len = rc->bnx_rx_mbuf->m_len;
900 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
904 bnx_init_rx_ring_std(struct bnx_rx_std_ring *std)
908 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
909 /* Use the first RX return ring's tmp RX mbuf DMA map */
910 error = bnx_newbuf_std(&std->bnx_sc->bnx_rx_ret_ring[0], i, 1);
913 bnx_setup_rxdesc_std(std, i);
916 std->bnx_rx_std_used = 0;
917 std->bnx_rx_std_refill = 0;
918 std->bnx_rx_std_running = 0;
920 lwkt_serialize_handler_enable(&std->bnx_rx_std_serialize);
922 std->bnx_rx_std = BGE_STD_RX_RING_CNT - 1;
923 bnx_writembx(std->bnx_sc, BGE_MBX_RX_STD_PROD_LO, std->bnx_rx_std);
929 bnx_free_rx_ring_std(struct bnx_rx_std_ring *std)
933 lwkt_serialize_handler_disable(&std->bnx_rx_std_serialize);
935 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
936 struct bnx_rx_buf *rb = &std->bnx_rx_std_buf[i];
938 rb->bnx_rx_refilled = 0;
939 if (rb->bnx_rx_mbuf != NULL) {
940 bus_dmamap_unload(std->bnx_rx_mtag, rb->bnx_rx_dmamap);
941 m_freem(rb->bnx_rx_mbuf);
942 rb->bnx_rx_mbuf = NULL;
944 bzero(&std->bnx_rx_std_ring[i], sizeof(struct bge_rx_bd));
949 bnx_init_rx_ring_jumbo(struct bnx_softc *sc)
954 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
955 error = bnx_newbuf_jumbo(sc, i, 1);
960 sc->bnx_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
962 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb;
963 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
964 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
966 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo);
972 bnx_free_rx_ring_jumbo(struct bnx_softc *sc)
976 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
977 struct bnx_rx_buf *rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i];
979 if (rc->bnx_rx_mbuf != NULL) {
980 m_freem(rc->bnx_rx_mbuf);
981 rc->bnx_rx_mbuf = NULL;
983 bzero(&sc->bnx_ldata.bnx_rx_jumbo_ring[i],
984 sizeof(struct bge_rx_bd));
989 bnx_free_tx_ring(struct bnx_tx_ring *txr)
993 for (i = 0; i < BGE_TX_RING_CNT; i++) {
994 struct bnx_tx_buf *buf = &txr->bnx_tx_buf[i];
996 if (buf->bnx_tx_mbuf != NULL) {
997 bus_dmamap_unload(txr->bnx_tx_mtag,
999 m_freem(buf->bnx_tx_mbuf);
1000 buf->bnx_tx_mbuf = NULL;
1002 bzero(&txr->bnx_tx_ring[i], sizeof(struct bge_tx_bd));
1004 txr->bnx_tx_saved_considx = BNX_TXCONS_UNSET;
1008 bnx_init_tx_ring(struct bnx_tx_ring *txr)
1010 txr->bnx_tx_cnt = 0;
1011 txr->bnx_tx_saved_considx = 0;
1012 txr->bnx_tx_prodidx = 0;
1014 /* Initialize transmit producer index for host-memory send ring. */
1015 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, txr->bnx_tx_prodidx);
1021 bnx_setmulti(struct bnx_softc *sc)
1024 struct ifmultiaddr *ifma;
1025 uint32_t hashes[4] = { 0, 0, 0, 0 };
1028 ifp = &sc->arpcom.ac_if;
1030 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1031 for (i = 0; i < 4; i++)
1032 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1036 /* First, zot all the existing filters. */
1037 for (i = 0; i < 4; i++)
1038 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1040 /* Now program new ones. */
1041 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1042 if (ifma->ifma_addr->sa_family != AF_LINK)
1045 LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1046 ETHER_ADDR_LEN) & 0x7f;
1047 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1050 for (i = 0; i < 4; i++)
1051 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1055 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1056 * self-test results.
1059 bnx_chipinit(struct bnx_softc *sc)
1061 uint32_t dma_rw_ctl, mode_ctl;
1064 /* Set endian type before we access any non-PCI registers. */
1065 pci_write_config(sc->bnx_dev, BGE_PCI_MISC_CTL,
1066 BGE_INIT | BGE_PCIMISCCTL_TAGGED_STATUS, 4);
1068 /* Clear the MAC control register */
1069 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1072 * Clear the MAC statistics block in the NIC's
1075 for (i = BGE_STATS_BLOCK;
1076 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1077 BNX_MEMWIN_WRITE(sc, i, 0);
1079 for (i = BGE_STATUS_BLOCK;
1080 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1081 BNX_MEMWIN_WRITE(sc, i, 0);
1083 if (BNX_IS_57765_FAMILY(sc)) {
1086 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) {
1087 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
1088 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS;
1090 /* Access the lower 1K of PL PCI-E block registers. */
1091 CSR_WRITE_4(sc, BGE_MODE_CTL,
1092 val | BGE_MODECTL_PCIE_PL_SEL);
1094 val = CSR_READ_4(sc, BGE_PCIE_PL_LO_PHYCTL5);
1095 val |= BGE_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ;
1096 CSR_WRITE_4(sc, BGE_PCIE_PL_LO_PHYCTL5, val);
1098 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1100 if (sc->bnx_chiprev != BGE_CHIPREV_57765_AX) {
1101 /* Fix transmit hangs */
1102 val = CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL);
1103 val |= BGE_CPMU_PADRNG_CTL_RDIV2;
1104 CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL, val);
1106 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
1107 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS;
1109 /* Access the lower 1K of DL PCI-E block registers. */
1110 CSR_WRITE_4(sc, BGE_MODE_CTL,
1111 val | BGE_MODECTL_PCIE_DL_SEL);
1113 val = CSR_READ_4(sc, BGE_PCIE_DL_LO_FTSMAX);
1114 val &= ~BGE_PCIE_DL_LO_FTSMAX_MASK;
1115 val |= BGE_PCIE_DL_LO_FTSMAX_VAL;
1116 CSR_WRITE_4(sc, BGE_PCIE_DL_LO_FTSMAX, val);
1118 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1121 val = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK);
1122 val &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK;
1123 val |= BGE_CPMU_LSPD_10MB_MACCLK_6_25;
1124 CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, val);
1128 * Set up the PCI DMA control register.
1130 dma_rw_ctl = pci_read_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, 4);
1132 * Disable 32bytes cache alignment for DMA write to host memory
1135 * 64bytes cache alignment for DMA write to host memory is still
1138 dma_rw_ctl |= BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1139 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0)
1140 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1142 * Enable HW workaround for controllers that misinterpret
1143 * a status tag update and leave interrupts permanently
1146 if (sc->bnx_asicrev != BGE_ASICREV_BCM5717 &&
1147 sc->bnx_asicrev != BGE_ASICREV_BCM5762 &&
1148 !BNX_IS_57765_FAMILY(sc))
1149 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1151 if_printf(&sc->arpcom.ac_if, "DMA read/write %#x\n",
1154 pci_write_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1157 * Set up general mode register.
1159 mode_ctl = bnx_dma_swap_options(sc) | BGE_MODECTL_MAC_ATTN_INTR |
1160 BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM;
1161 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1164 * Disable memory write invalidate. Apparently it is not supported
1165 * properly by these devices. Also ensure that INTx isn't disabled,
1166 * as these chips need it even when using MSI.
1168 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_CMD,
1169 (PCIM_CMD_MWRICEN | PCIM_CMD_INTxDIS), 4);
1171 /* Set the timer prescaler (always 66Mhz) */
1172 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1178 bnx_blockinit(struct bnx_softc *sc)
1180 struct bnx_intr_data *intr;
1181 struct bge_rcb *rcb;
1188 * Initialize the memory window pointer register so that
1189 * we can access the first 32K of internal NIC RAM. This will
1190 * allow us to set up the TX send ring RCBs and the RX return
1191 * ring RCBs, plus other things which live in NIC memory.
1193 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1195 /* Configure mbuf pool watermarks */
1196 if (BNX_IS_57765_PLUS(sc)) {
1197 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1198 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) {
1199 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1200 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1202 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1203 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1206 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1207 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1208 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1211 /* Configure DMA resource watermarks */
1212 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1213 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1215 /* Enable buffer manager */
1216 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1218 * Change the arbitration algorithm of TXMBUF read request to
1219 * round-robin instead of priority based for BCM5719. When
1220 * TXFIFO is almost empty, RDMA will hold its request until
1221 * TXFIFO is not almost empty.
1223 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719)
1224 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
1225 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1226 sc->bnx_chipid == BGE_CHIPID_BCM5719_A0 ||
1227 sc->bnx_chipid == BGE_CHIPID_BCM5720_A0)
1228 val |= BGE_BMANMODE_LOMBUF_ATTN;
1229 CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1231 /* Poll for buffer manager start indication */
1232 for (i = 0; i < BNX_TIMEOUT; i++) {
1233 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1238 if (i == BNX_TIMEOUT) {
1239 if_printf(&sc->arpcom.ac_if,
1240 "buffer manager failed to start\n");
1244 /* Enable flow-through queues */
1245 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1246 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1248 /* Wait until queue initialization is complete */
1249 for (i = 0; i < BNX_TIMEOUT; i++) {
1250 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1255 if (i == BNX_TIMEOUT) {
1256 if_printf(&sc->arpcom.ac_if,
1257 "flow-through queue init failed\n");
1262 * Summary of rings supported by the controller:
1264 * Standard Receive Producer Ring
1265 * - This ring is used to feed receive buffers for "standard"
1266 * sized frames (typically 1536 bytes) to the controller.
1268 * Jumbo Receive Producer Ring
1269 * - This ring is used to feed receive buffers for jumbo sized
1270 * frames (i.e. anything bigger than the "standard" frames)
1271 * to the controller.
1273 * Mini Receive Producer Ring
1274 * - This ring is used to feed receive buffers for "mini"
1275 * sized frames to the controller.
1276 * - This feature required external memory for the controller
1277 * but was never used in a production system. Should always
1280 * Receive Return Ring
1281 * - After the controller has placed an incoming frame into a
1282 * receive buffer that buffer is moved into a receive return
1283 * ring. The driver is then responsible to passing the
1284 * buffer up to the stack. BCM5718/BCM57785 families support
1285 * multiple receive return rings.
1288 * - This ring is used for outgoing frames. BCM5719/BCM5720
1289 * support multiple send rings.
1292 /* Initialize the standard receive producer ring control block. */
1293 rcb = &sc->bnx_ldata.bnx_info.bnx_std_rx_rcb;
1294 rcb->bge_hostaddr.bge_addr_lo =
1295 BGE_ADDR_LO(sc->bnx_rx_std_ring.bnx_rx_std_ring_paddr);
1296 rcb->bge_hostaddr.bge_addr_hi =
1297 BGE_ADDR_HI(sc->bnx_rx_std_ring.bnx_rx_std_ring_paddr);
1298 if (BNX_IS_57765_PLUS(sc)) {
1300 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1301 * Bits 15-2 : Maximum RX frame size
1302 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
1305 rcb->bge_maxlen_flags =
1306 BGE_RCB_MAXLEN_FLAGS(512, BNX_MAX_FRAMELEN << 2);
1309 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1310 * Bits 15-2 : Reserved (should be 0)
1311 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1314 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1316 if (BNX_IS_5717_PLUS(sc))
1317 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1319 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1320 /* Write the standard receive producer ring control block. */
1321 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1322 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1323 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1324 if (!BNX_IS_5717_PLUS(sc))
1325 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1326 /* Reset the standard receive producer ring producer index. */
1327 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1330 * Initialize the jumbo RX producer ring control
1331 * block. We set the 'ring disabled' bit in the
1332 * flags field until we're actually ready to start
1333 * using this ring (i.e. once we set the MTU
1334 * high enough to require it).
1336 if (BNX_IS_JUMBO_CAPABLE(sc)) {
1337 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb;
1338 /* Get the jumbo receive producer ring RCB parameters. */
1339 rcb->bge_hostaddr.bge_addr_lo =
1340 BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
1341 rcb->bge_hostaddr.bge_addr_hi =
1342 BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
1343 rcb->bge_maxlen_flags =
1344 BGE_RCB_MAXLEN_FLAGS(BNX_MAX_FRAMELEN,
1345 BGE_RCB_FLAG_RING_DISABLED);
1346 if (BNX_IS_5717_PLUS(sc))
1347 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1349 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1350 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1351 rcb->bge_hostaddr.bge_addr_hi);
1352 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1353 rcb->bge_hostaddr.bge_addr_lo);
1354 /* Program the jumbo receive producer ring RCB parameters. */
1355 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1356 rcb->bge_maxlen_flags);
1357 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1358 /* Reset the jumbo receive producer ring producer index. */
1359 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1363 * The BD ring replenish thresholds control how often the
1364 * hardware fetches new BD's from the producer rings in host
1365 * memory. Setting the value too low on a busy system can
1366 * starve the hardware and recue the throughpout.
1368 * Set the BD ring replentish thresholds. The recommended
1369 * values are 1/8th the number of descriptors allocated to
1373 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1374 if (BNX_IS_JUMBO_CAPABLE(sc)) {
1375 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1376 BGE_JUMBO_RX_RING_CNT/8);
1378 if (BNX_IS_57765_PLUS(sc)) {
1379 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1380 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1384 * Disable all send rings by setting the 'ring disabled' bit
1385 * in the flags field of all the TX send ring control blocks,
1386 * located in NIC memory.
1388 if (BNX_IS_5717_PLUS(sc))
1390 else if (BNX_IS_57765_FAMILY(sc) ||
1391 sc->bnx_asicrev == BGE_ASICREV_BCM5762)
1395 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1396 for (i = 0; i < limit; i++) {
1397 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1398 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1399 vrcb += sizeof(struct bge_rcb);
1403 * Configure send ring RCBs
1405 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1406 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
1407 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
1409 BGE_HOSTADDR(taddr, txr->bnx_tx_ring_paddr);
1410 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi,
1412 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo,
1414 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1415 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1416 vrcb += sizeof(struct bge_rcb);
1420 * Disable all receive return rings by setting the
1421 * 'ring disabled' bit in the flags field of all the receive
1422 * return ring control blocks, located in NIC memory.
1424 if (BNX_IS_5717_PLUS(sc)) {
1425 /* Should be 17, use 16 until we get an SRAM map. */
1427 } else if (BNX_IS_57765_FAMILY(sc) ||
1428 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1433 /* Disable all receive return rings. */
1434 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1435 for (i = 0; i < limit; i++) {
1436 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1437 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1438 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1439 BGE_RCB_FLAG_RING_DISABLED);
1440 bnx_writembx(sc, BGE_MBX_RX_CONS0_LO +
1441 (i * (sizeof(uint64_t))), 0);
1442 vrcb += sizeof(struct bge_rcb);
1446 * Set up receive return rings.
1448 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1449 for (i = 0; i < sc->bnx_rx_retcnt; ++i) {
1450 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i];
1452 BGE_HOSTADDR(taddr, ret->bnx_rx_ret_ring_paddr);
1453 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi,
1455 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo,
1457 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1458 BGE_RCB_MAXLEN_FLAGS(BNX_RETURN_RING_CNT, 0));
1459 vrcb += sizeof(struct bge_rcb);
1462 /* Set random backoff seed for TX */
1463 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1464 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1465 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1466 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1467 BGE_TX_BACKOFF_SEED_MASK);
1469 /* Set inter-packet gap */
1471 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
1472 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1473 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
1474 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
1476 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
1479 * Specify which ring to use for packets that don't match
1482 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1485 * Configure number of RX lists. One interrupt distribution
1486 * list, sixteen active lists, one bad frames class.
1488 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1490 /* Inialize RX list placement stats mask. */
1491 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1492 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1494 /* Disable host coalescing until we get it set up */
1495 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1497 /* Poll to make sure it's shut down. */
1498 for (i = 0; i < BNX_TIMEOUT; i++) {
1499 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1504 if (i == BNX_TIMEOUT) {
1505 if_printf(&sc->arpcom.ac_if,
1506 "host coalescing engine failed to idle\n");
1510 /* Set up host coalescing defaults */
1511 sc->bnx_coal_chg = BNX_RX_COAL_TICKS_CHG |
1512 BNX_TX_COAL_TICKS_CHG |
1513 BNX_RX_COAL_BDS_CHG |
1514 BNX_TX_COAL_BDS_CHG |
1515 BNX_RX_COAL_BDS_INT_CHG |
1516 BNX_TX_COAL_BDS_INT_CHG;
1517 bnx_coal_change(sc);
1520 * Set up addresses of status blocks
1522 intr = &sc->bnx_intr_data[0];
1523 bzero(intr->bnx_status_block, BGE_STATUS_BLK_SZ);
1524 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1525 BGE_ADDR_HI(intr->bnx_status_block_paddr));
1526 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1527 BGE_ADDR_LO(intr->bnx_status_block_paddr));
1528 for (i = 1; i < sc->bnx_intr_cnt; ++i) {
1529 intr = &sc->bnx_intr_data[i];
1530 bzero(intr->bnx_status_block, BGE_STATUS_BLK_SZ);
1531 CSR_WRITE_4(sc, BGE_VEC1_STATUSBLK_ADDR_HI + ((i - 1) * 8),
1532 BGE_ADDR_HI(intr->bnx_status_block_paddr));
1533 CSR_WRITE_4(sc, BGE_VEC1_STATUSBLK_ADDR_LO + ((i - 1) * 8),
1534 BGE_ADDR_LO(intr->bnx_status_block_paddr));
1537 /* Set up status block partail update size. */
1538 val = BGE_STATBLKSZ_32BYTE;
1541 * Does not seem to have visible effect in both
1542 * bulk data (1472B UDP datagram) and tiny data
1543 * (18B UDP datagram) TX tests.
1545 val |= BGE_HCCMODE_CLRTICK_TX;
1547 /* Turn on host coalescing state machine */
1548 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1550 /* Turn on RX BD completion state machine and enable attentions */
1551 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1552 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1554 /* Turn on RX list placement state machine */
1555 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1557 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1558 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1559 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1560 BGE_MACMODE_FRMHDR_DMA_ENB;
1562 if (sc->bnx_flags & BNX_FLAG_TBI)
1563 val |= BGE_PORTMODE_TBI;
1564 else if (sc->bnx_flags & BNX_FLAG_MII_SERDES)
1565 val |= BGE_PORTMODE_GMII;
1567 val |= BGE_PORTMODE_MII;
1569 /* Turn on DMA, clear stats */
1570 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1572 /* Set misc. local control, enable interrupts on attentions */
1573 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1576 /* Assert GPIO pins for PHY reset */
1577 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1578 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1579 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1580 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1583 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSIX)
1584 bnx_enable_msi(sc, TRUE);
1586 /* Turn on write DMA state machine */
1587 val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1588 /* Enable host coalescing bug fix. */
1589 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1590 if (sc->bnx_asicrev == BGE_ASICREV_BCM5785) {
1591 /* Request larger DMA burst size to get better performance. */
1592 val |= BGE_WDMAMODE_BURST_ALL_DATA;
1594 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1597 if (BNX_IS_57765_PLUS(sc)) {
1598 uint32_t dmactl, dmactl_reg;
1600 if (sc->bnx_asicrev == BGE_ASICREV_BCM5762)
1601 dmactl_reg = BGE_RDMA_RSRVCTRL2;
1603 dmactl_reg = BGE_RDMA_RSRVCTRL;
1605 dmactl = CSR_READ_4(sc, dmactl_reg);
1607 * Adjust tx margin to prevent TX data corruption and
1608 * fix internal FIFO overflow.
1610 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1611 sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
1612 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1613 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
1614 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
1615 BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
1616 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
1617 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
1618 BGE_RDMA_RSRVCTRL_TXMRGN_320B;
1621 * Enable fix for read DMA FIFO overruns.
1622 * The fix is to limit the number of RX BDs
1623 * the hardware would fetch at a fime.
1625 CSR_WRITE_4(sc, dmactl_reg,
1626 dmactl | BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
1629 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) {
1630 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
1631 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
1632 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
1633 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
1634 } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
1635 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1638 if (sc->bnx_asicrev == BGE_ASICREV_BCM5762)
1639 ctrl_reg = BGE_RDMA_LSO_CRPTEN_CTRL2;
1641 ctrl_reg = BGE_RDMA_LSO_CRPTEN_CTRL;
1644 * Allow 4KB burst length reads for non-LSO frames.
1645 * Enable 512B burst length reads for buffer descriptors.
1647 CSR_WRITE_4(sc, ctrl_reg,
1648 CSR_READ_4(sc, ctrl_reg) |
1649 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
1650 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
1653 /* Turn on read DMA state machine */
1654 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1655 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717)
1656 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
1657 if (sc->bnx_asicrev == BGE_ASICREV_BCM5784 ||
1658 sc->bnx_asicrev == BGE_ASICREV_BCM5785 ||
1659 sc->bnx_asicrev == BGE_ASICREV_BCM57780) {
1660 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1661 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1662 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1664 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
1665 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1666 val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
1667 BGE_RDMAMODE_H2BNC_VLAN_DET;
1669 * Allow multiple outstanding read requests from
1670 * non-LSO read DMA engine.
1672 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
1674 if (sc->bnx_asicrev == BGE_ASICREV_BCM57766)
1675 val |= BGE_RDMAMODE_JMB_2K_MMRR;
1676 if (sc->bnx_flags & BNX_FLAG_TSO)
1677 val |= BGE_RDMAMODE_TSO4_ENABLE;
1678 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1679 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1682 /* Turn on RX data completion state machine */
1683 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1685 /* Turn on RX BD initiator state machine */
1686 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1688 /* Turn on RX data and RX BD initiator state machine */
1689 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1691 /* Turn on send BD completion state machine */
1692 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1694 /* Turn on send data completion state machine */
1695 val = BGE_SDCMODE_ENABLE;
1696 if (sc->bnx_asicrev == BGE_ASICREV_BCM5761)
1697 val |= BGE_SDCMODE_CDELAY;
1698 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1700 /* Turn on send data initiator state machine */
1701 if (sc->bnx_flags & BNX_FLAG_TSO) {
1702 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
1703 BGE_SDIMODE_HW_LSO_PRE_DMA);
1705 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1708 /* Turn on send BD initiator state machine */
1709 val = BGE_SBDIMODE_ENABLE;
1710 if (sc->bnx_tx_ringcnt > 1)
1711 val |= BGE_SBDIMODE_MULTI_TXR;
1712 CSR_WRITE_4(sc, BGE_SBDI_MODE, val);
1714 /* Turn on send BD selector state machine */
1715 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1717 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1718 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1719 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1721 /* ack/clear link change events */
1722 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1723 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1724 BGE_MACSTAT_LINK_CHANGED);
1725 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1728 * Enable attention when the link has changed state for
1729 * devices that use auto polling.
1731 if (sc->bnx_flags & BNX_FLAG_TBI) {
1732 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1734 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
1735 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
1741 * Clear any pending link state attention.
1742 * Otherwise some link state change events may be lost until attention
1743 * is cleared by bnx_intr() -> bnx_softc.bnx_link_upd() sequence.
1744 * It's not necessary on newer BCM chips - perhaps enabling link
1745 * state change attentions implies clearing pending attention.
1747 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1748 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1749 BGE_MACSTAT_LINK_CHANGED);
1751 /* Enable link state change attentions. */
1752 BNX_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1758 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1759 * against our list and return its name if we find a match. Note
1760 * that since the Broadcom controller contains VPD support, we
1761 * can get the device name string from the controller itself instead
1762 * of the compiled-in string. This is a little slow, but it guarantees
1763 * we'll always announce the right product name.
1766 bnx_probe(device_t dev)
1768 const struct bnx_type *t;
1769 uint16_t product, vendor;
1771 if (!pci_is_pcie(dev))
1774 product = pci_get_device(dev);
1775 vendor = pci_get_vendor(dev);
1777 for (t = bnx_devs; t->bnx_name != NULL; t++) {
1778 if (vendor == t->bnx_vid && product == t->bnx_did)
1781 if (t->bnx_name == NULL)
1784 device_set_desc(dev, t->bnx_name);
1789 bnx_attach(device_t dev)
1792 struct bnx_softc *sc;
1793 struct bnx_rx_std_ring *std;
1795 int error = 0, rid, capmask, i, std_cpuid, std_cpuid_def;
1796 uint8_t ether_addr[ETHER_ADDR_LEN];
1798 uintptr_t mii_priv = 0;
1799 #if defined(BNX_TSO_DEBUG) || defined(BNX_RSS_DEBUG) || defined(BNX_TSS_DEBUG)
1802 #ifdef IFPOLL_ENABLE
1803 int offset, offset_def;
1806 sc = device_get_softc(dev);
1808 callout_init_mp(&sc->bnx_tick_timer);
1809 lwkt_serialize_init(&sc->bnx_jslot_serializer);
1810 lwkt_serialize_init(&sc->bnx_main_serialize);
1812 /* Always setup interrupt mailboxes */
1813 for (i = 0; i < BNX_INTR_MAX; ++i) {
1814 callout_init_mp(&sc->bnx_intr_data[i].bnx_intr_timer);
1815 sc->bnx_intr_data[i].bnx_sc = sc;
1816 sc->bnx_intr_data[i].bnx_intr_mbx = BGE_MBX_IRQ0_LO + (i * 8);
1817 sc->bnx_intr_data[i].bnx_intr_rid = -1;
1818 sc->bnx_intr_data[i].bnx_intr_cpuid = -1;
1821 product = pci_get_device(dev);
1823 #ifndef BURN_BRIDGES
1824 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1827 irq = pci_read_config(dev, PCIR_INTLINE, 4);
1828 mem = pci_read_config(dev, BGE_PCI_BAR0, 4);
1830 device_printf(dev, "chip is in D%d power mode "
1831 "-- setting to D0\n", pci_get_powerstate(dev));
1833 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1835 pci_write_config(dev, PCIR_INTLINE, irq, 4);
1836 pci_write_config(dev, BGE_PCI_BAR0, mem, 4);
1838 #endif /* !BURN_BRIDGE */
1841 * Map control/status registers.
1843 pci_enable_busmaster(dev);
1846 sc->bnx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1849 if (sc->bnx_res == NULL) {
1850 device_printf(dev, "couldn't map memory\n");
1854 sc->bnx_btag = rman_get_bustag(sc->bnx_res);
1855 sc->bnx_bhandle = rman_get_bushandle(sc->bnx_res);
1857 /* Save various chip information */
1859 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
1860 BGE_PCIMISCCTL_ASICREV_SHIFT;
1861 if (BGE_ASICREV(sc->bnx_chipid) == BGE_ASICREV_USE_PRODID_REG) {
1862 /* All chips having dedicated ASICREV register have CPMU */
1863 sc->bnx_flags |= BNX_FLAG_CPMU;
1866 case PCI_PRODUCT_BROADCOM_BCM5717:
1867 case PCI_PRODUCT_BROADCOM_BCM5717C:
1868 case PCI_PRODUCT_BROADCOM_BCM5718:
1869 case PCI_PRODUCT_BROADCOM_BCM5719:
1870 case PCI_PRODUCT_BROADCOM_BCM5720_ALT:
1871 case PCI_PRODUCT_BROADCOM_BCM5725:
1872 case PCI_PRODUCT_BROADCOM_BCM5727:
1873 case PCI_PRODUCT_BROADCOM_BCM5762:
1874 sc->bnx_chipid = pci_read_config(dev,
1875 BGE_PCI_GEN2_PRODID_ASICREV, 4);
1878 case PCI_PRODUCT_BROADCOM_BCM57761:
1879 case PCI_PRODUCT_BROADCOM_BCM57762:
1880 case PCI_PRODUCT_BROADCOM_BCM57765:
1881 case PCI_PRODUCT_BROADCOM_BCM57766:
1882 case PCI_PRODUCT_BROADCOM_BCM57781:
1883 case PCI_PRODUCT_BROADCOM_BCM57782:
1884 case PCI_PRODUCT_BROADCOM_BCM57785:
1885 case PCI_PRODUCT_BROADCOM_BCM57786:
1886 case PCI_PRODUCT_BROADCOM_BCM57791:
1887 case PCI_PRODUCT_BROADCOM_BCM57795:
1888 sc->bnx_chipid = pci_read_config(dev,
1889 BGE_PCI_GEN15_PRODID_ASICREV, 4);
1893 sc->bnx_chipid = pci_read_config(dev,
1894 BGE_PCI_PRODID_ASICREV, 4);
1898 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_C0)
1899 sc->bnx_chipid = BGE_CHIPID_BCM5720_A0;
1901 sc->bnx_asicrev = BGE_ASICREV(sc->bnx_chipid);
1902 sc->bnx_chiprev = BGE_CHIPREV(sc->bnx_chipid);
1904 switch (sc->bnx_asicrev) {
1905 case BGE_ASICREV_BCM5717:
1906 case BGE_ASICREV_BCM5719:
1907 case BGE_ASICREV_BCM5720:
1908 sc->bnx_flags |= BNX_FLAG_5717_PLUS | BNX_FLAG_57765_PLUS;
1911 case BGE_ASICREV_BCM5762:
1912 sc->bnx_flags |= BNX_FLAG_57765_PLUS;
1915 case BGE_ASICREV_BCM57765:
1916 case BGE_ASICREV_BCM57766:
1917 sc->bnx_flags |= BNX_FLAG_57765_FAMILY | BNX_FLAG_57765_PLUS;
1921 sc->bnx_flags |= BNX_FLAG_TSO;
1922 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 &&
1923 sc->bnx_chipid == BGE_CHIPID_BCM5719_A0)
1924 sc->bnx_flags &= ~BNX_FLAG_TSO;
1926 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1927 BNX_IS_57765_FAMILY(sc)) {
1929 * All BCM57785 and BCM5718 families chips have a bug that
1930 * under certain situation interrupt will not be enabled
1931 * even if status tag is written to interrupt mailbox.
1933 * While BCM5719 and BCM5720 have a hardware workaround
1934 * which could fix the above bug.
1935 * See the comment near BGE_PCIDMARWCTL_TAGGED_STATUS_WA in
1938 * For the rest of the chips in these two families, we will
1939 * have to poll the status block at high rate (10ms currently)
1940 * to check whether the interrupt is hosed or not.
1941 * See bnx_check_intr_*() for details.
1943 sc->bnx_flags |= BNX_FLAG_STATUSTAG_BUG;
1946 sc->bnx_pciecap = pci_get_pciecap_ptr(sc->bnx_dev);
1947 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1948 sc->bnx_asicrev == BGE_ASICREV_BCM5720)
1949 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_2048);
1951 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096);
1952 device_printf(dev, "CHIP ID 0x%08x; "
1953 "ASIC REV 0x%02x; CHIP REV 0x%02x\n",
1954 sc->bnx_chipid, sc->bnx_asicrev, sc->bnx_chiprev);
1957 * Set various PHY quirk flags.
1960 capmask = MII_CAPMASK_DEFAULT;
1961 if (product == PCI_PRODUCT_BROADCOM_BCM57791 ||
1962 product == PCI_PRODUCT_BROADCOM_BCM57795) {
1964 capmask &= ~BMSR_EXTSTAT;
1967 mii_priv |= BRGPHY_FLAG_WIRESPEED;
1968 if (sc->bnx_chipid == BGE_CHIPID_BCM5762_A0)
1969 mii_priv |= BRGPHY_FLAG_5762_A0;
1971 /* Initialize if_name earlier, so if_printf could be used */
1972 ifp = &sc->arpcom.ac_if;
1973 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1975 /* Try to reset the chip. */
1978 if (bnx_chipinit(sc)) {
1979 device_printf(dev, "chip initialization failed\n");
1985 * Get station address
1987 error = bnx_get_eaddr(sc, ether_addr);
1989 device_printf(dev, "failed to read station address\n");
1993 /* Setup RX/TX and interrupt count */
1994 bnx_setup_ring_cnt(sc);
1996 if ((sc->bnx_rx_retcnt == 1 && sc->bnx_tx_ringcnt == 1) ||
1997 (sc->bnx_rx_retcnt > 1 && sc->bnx_tx_ringcnt > 1)) {
1999 * The RX ring and the corresponding TX ring processing
2000 * should be on the same CPU, since they share the same
2003 sc->bnx_flags |= BNX_FLAG_RXTX_BUNDLE;
2005 device_printf(dev, "RX/TX bundle\n");
2006 if (sc->bnx_tx_ringcnt > 1) {
2008 * Multiple TX rings do not share status block
2009 * with link status, so link status will have
2010 * to save its own status_tag.
2012 sc->bnx_flags |= BNX_FLAG_STATUS_HASTAG;
2014 device_printf(dev, "status needs tag\n");
2017 KKASSERT(sc->bnx_rx_retcnt > 1 && sc->bnx_tx_ringcnt == 1);
2019 device_printf(dev, "RX/TX not bundled\n");
2022 error = bnx_dma_alloc(dev);
2026 #ifdef IFPOLL_ENABLE
2027 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) {
2029 * NPOLLING RX/TX CPU offset
2031 if (sc->bnx_rx_retcnt == ncpus2) {
2035 (sc->bnx_rx_retcnt * device_get_unit(dev)) % ncpus2;
2036 offset = device_getenv_int(dev, "npoll.offset",
2038 if (offset >= ncpus2 ||
2039 offset % sc->bnx_rx_retcnt != 0) {
2040 device_printf(dev, "invalid npoll.offset %d, "
2041 "use %d\n", offset, offset_def);
2042 offset = offset_def;
2045 sc->bnx_npoll_rxoff = offset;
2046 sc->bnx_npoll_txoff = offset;
2049 * NPOLLING RX CPU offset
2051 if (sc->bnx_rx_retcnt == ncpus2) {
2055 (sc->bnx_rx_retcnt * device_get_unit(dev)) % ncpus2;
2056 offset = device_getenv_int(dev, "npoll.rxoff",
2058 if (offset >= ncpus2 ||
2059 offset % sc->bnx_rx_retcnt != 0) {
2060 device_printf(dev, "invalid npoll.rxoff %d, "
2061 "use %d\n", offset, offset_def);
2062 offset = offset_def;
2065 sc->bnx_npoll_rxoff = offset;
2068 * NPOLLING TX CPU offset
2070 offset_def = device_get_unit(dev) % ncpus2;
2071 offset = device_getenv_int(dev, "npoll.txoff", offset_def);
2072 if (offset >= ncpus2) {
2073 device_printf(dev, "invalid npoll.txoff %d, use %d\n",
2074 offset, offset_def);
2075 offset = offset_def;
2077 sc->bnx_npoll_txoff = offset;
2079 #endif /* IFPOLL_ENABLE */
2082 * Allocate interrupt
2084 error = bnx_alloc_intr(sc);
2088 /* Setup serializers */
2089 bnx_setup_serialize(sc);
2091 /* Set default tuneable values. */
2092 sc->bnx_rx_coal_ticks = BNX_RX_COAL_TICKS_DEF;
2093 sc->bnx_tx_coal_ticks = BNX_TX_COAL_TICKS_DEF;
2094 sc->bnx_rx_coal_bds = BNX_RX_COAL_BDS_DEF;
2095 sc->bnx_rx_coal_bds_poll = sc->bnx_rx_ret_ring[0].bnx_rx_cntmax;
2096 sc->bnx_tx_coal_bds = BNX_TX_COAL_BDS_DEF;
2097 sc->bnx_tx_coal_bds_poll = BNX_TX_COAL_BDS_POLL_DEF;
2098 sc->bnx_rx_coal_bds_int = BNX_RX_COAL_BDS_INT_DEF;
2099 sc->bnx_tx_coal_bds_int = BNX_TX_COAL_BDS_INT_DEF;
2101 /* Set up ifnet structure */
2103 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2104 ifp->if_ioctl = bnx_ioctl;
2105 ifp->if_start = bnx_start;
2106 #ifdef IFPOLL_ENABLE
2107 ifp->if_npoll = bnx_npoll;
2109 ifp->if_init = bnx_init;
2110 ifp->if_serialize = bnx_serialize;
2111 ifp->if_deserialize = bnx_deserialize;
2112 ifp->if_tryserialize = bnx_tryserialize;
2114 ifp->if_serialize_assert = bnx_serialize_assert;
2116 ifp->if_mtu = ETHERMTU;
2117 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2119 ifp->if_capabilities |= IFCAP_HWCSUM;
2120 ifp->if_hwassist = BNX_CSUM_FEATURES;
2121 if (sc->bnx_flags & BNX_FLAG_TSO) {
2122 ifp->if_capabilities |= IFCAP_TSO;
2123 ifp->if_hwassist |= CSUM_TSO;
2125 if (BNX_RSS_ENABLED(sc))
2126 ifp->if_capabilities |= IFCAP_RSS;
2127 ifp->if_capenable = ifp->if_capabilities;
2129 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
2130 ifq_set_ready(&ifp->if_snd);
2131 ifq_set_subq_cnt(&ifp->if_snd, sc->bnx_tx_ringcnt);
2133 if (sc->bnx_tx_ringcnt > 1) {
2134 ifp->if_mapsubq = ifq_mapsubq_mask;
2135 ifq_set_subq_mask(&ifp->if_snd, sc->bnx_tx_ringcnt - 1);
2139 * Figure out what sort of media we have by checking the
2140 * hardware config word in the first 32k of NIC internal memory,
2141 * or fall back to examining the EEPROM if necessary.
2142 * Note: on some BCM5700 cards, this value appears to be unset.
2143 * If that's the case, we have to rely on identifying the NIC
2144 * by its PCI subsystem ID, as we do below for the SysKonnect
2147 if (bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) {
2148 hwcfg = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2150 if (bnx_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2152 device_printf(dev, "failed to read EEPROM\n");
2156 hwcfg = ntohl(hwcfg);
2159 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2160 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41 ||
2161 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2162 sc->bnx_flags |= BNX_FLAG_TBI;
2165 if (sc->bnx_flags & BNX_FLAG_CPMU)
2166 sc->bnx_mi_mode = BGE_MIMODE_500KHZ_CONST;
2168 sc->bnx_mi_mode = BGE_MIMODE_BASE;
2170 /* Setup link status update stuffs */
2171 if (sc->bnx_flags & BNX_FLAG_TBI) {
2172 sc->bnx_link_upd = bnx_tbi_link_upd;
2173 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
2174 } else if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
2175 sc->bnx_link_upd = bnx_autopoll_link_upd;
2176 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
2178 sc->bnx_link_upd = bnx_copper_link_upd;
2179 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
2182 /* Set default PHY address */
2186 * PHY address mapping for various devices.
2188 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2189 * ---------+-------+-------+-------+-------+
2190 * BCM57XX | 1 | X | X | X |
2191 * BCM5704 | 1 | X | 1 | X |
2192 * BCM5717 | 1 | 8 | 2 | 9 |
2193 * BCM5719 | 1 | 8 | 2 | 9 |
2194 * BCM5720 | 1 | 8 | 2 | 9 |
2196 * Other addresses may respond but they are not
2197 * IEEE compliant PHYs and should be ignored.
2199 if (BNX_IS_5717_PLUS(sc)) {
2202 f = pci_get_function(dev);
2203 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_A0) {
2204 if (CSR_READ_4(sc, BGE_SGDIG_STS) &
2205 BGE_SGDIGSTS_IS_SERDES)
2206 sc->bnx_phyno = f + 8;
2208 sc->bnx_phyno = f + 1;
2210 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2211 BGE_CPMU_PHY_STRAP_IS_SERDES)
2212 sc->bnx_phyno = f + 8;
2214 sc->bnx_phyno = f + 1;
2218 if (sc->bnx_flags & BNX_FLAG_TBI) {
2219 ifmedia_init(&sc->bnx_ifmedia, IFM_IMASK,
2220 bnx_ifmedia_upd, bnx_ifmedia_sts);
2221 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2222 ifmedia_add(&sc->bnx_ifmedia,
2223 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2224 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2225 ifmedia_set(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO);
2226 sc->bnx_ifmedia.ifm_media = sc->bnx_ifmedia.ifm_cur->ifm_media;
2228 struct mii_probe_args mii_args;
2230 mii_probe_args_init(&mii_args, bnx_ifmedia_upd, bnx_ifmedia_sts);
2231 mii_args.mii_probemask = 1 << sc->bnx_phyno;
2232 mii_args.mii_capmask = capmask;
2233 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY;
2234 mii_args.mii_priv = mii_priv;
2236 error = mii_probe(dev, &sc->bnx_miibus, &mii_args);
2238 device_printf(dev, "MII without any PHY!\n");
2244 * Create sysctl nodes.
2246 sysctl_ctx_init(&sc->bnx_sysctl_ctx);
2247 sc->bnx_sysctl_tree = SYSCTL_ADD_NODE(&sc->bnx_sysctl_ctx,
2248 SYSCTL_STATIC_CHILDREN(_hw),
2250 device_get_nameunit(dev),
2252 if (sc->bnx_sysctl_tree == NULL) {
2253 device_printf(dev, "can't add sysctl node\n");
2258 SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx,
2259 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2260 "rx_rings", CTLFLAG_RD, &sc->bnx_rx_retcnt, 0, "# of RX rings");
2261 SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx,
2262 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2263 "tx_rings", CTLFLAG_RD, &sc->bnx_tx_ringcnt, 0, "# of TX rings");
2265 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2266 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2267 OID_AUTO, "rx_coal_ticks",
2268 CTLTYPE_INT | CTLFLAG_RW,
2269 sc, 0, bnx_sysctl_rx_coal_ticks, "I",
2270 "Receive coalescing ticks (usec).");
2271 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2272 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2273 OID_AUTO, "tx_coal_ticks",
2274 CTLTYPE_INT | CTLFLAG_RW,
2275 sc, 0, bnx_sysctl_tx_coal_ticks, "I",
2276 "Transmit coalescing ticks (usec).");
2277 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2278 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2279 OID_AUTO, "rx_coal_bds",
2280 CTLTYPE_INT | CTLFLAG_RW,
2281 sc, 0, bnx_sysctl_rx_coal_bds, "I",
2282 "Receive max coalesced BD count.");
2283 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2284 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2285 OID_AUTO, "rx_coal_bds_poll",
2286 CTLTYPE_INT | CTLFLAG_RW,
2287 sc, 0, bnx_sysctl_rx_coal_bds_poll, "I",
2288 "Receive max coalesced BD count in polling.");
2289 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2290 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2291 OID_AUTO, "tx_coal_bds",
2292 CTLTYPE_INT | CTLFLAG_RW,
2293 sc, 0, bnx_sysctl_tx_coal_bds, "I",
2294 "Transmit max coalesced BD count.");
2295 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2296 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2297 OID_AUTO, "tx_coal_bds_poll",
2298 CTLTYPE_INT | CTLFLAG_RW,
2299 sc, 0, bnx_sysctl_tx_coal_bds_poll, "I",
2300 "Transmit max coalesced BD count in polling.");
2302 * A common design characteristic for many Broadcom
2303 * client controllers is that they only support a
2304 * single outstanding DMA read operation on the PCIe
2305 * bus. This means that it will take twice as long to
2306 * fetch a TX frame that is split into header and
2307 * payload buffers as it does to fetch a single,
2308 * contiguous TX frame (2 reads vs. 1 read). For these
2309 * controllers, coalescing buffers to reduce the number
2310 * of memory reads is effective way to get maximum
2311 * performance(about 940Mbps). Without collapsing TX
2312 * buffers the maximum TCP bulk transfer performance
2313 * is about 850Mbps. However forcing coalescing mbufs
2314 * consumes a lot of CPU cycles, so leave it off by
2317 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2318 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2319 "force_defrag", CTLTYPE_INT | CTLFLAG_RW,
2320 sc, 0, bnx_sysctl_force_defrag, "I",
2321 "Force defragment on TX path");
2323 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2324 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2325 "tx_wreg", CTLTYPE_INT | CTLFLAG_RW,
2326 sc, 0, bnx_sysctl_tx_wreg, "I",
2327 "# of segments before writing to hardware register");
2329 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2330 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2331 "std_refill", CTLTYPE_INT | CTLFLAG_RW,
2332 sc, 0, bnx_sysctl_std_refill, "I",
2333 "# of packets received before scheduling standard refilling");
2335 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2336 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2337 "rx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2338 sc, 0, bnx_sysctl_rx_coal_bds_int, "I",
2339 "Receive max coalesced BD count during interrupt.");
2340 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2341 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2342 "tx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2343 sc, 0, bnx_sysctl_tx_coal_bds_int, "I",
2344 "Transmit max coalesced BD count during interrupt.");
2346 #ifdef IFPOLL_ENABLE
2347 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) {
2348 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2349 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2350 "npoll_offset", CTLTYPE_INT | CTLFLAG_RW,
2351 sc, 0, bnx_sysctl_npoll_offset, "I",
2352 "NPOLLING cpu offset");
2354 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2355 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2356 "npoll_rxoff", CTLTYPE_INT | CTLFLAG_RW,
2357 sc, 0, bnx_sysctl_npoll_rxoff, "I",
2358 "NPOLLING RX cpu offset");
2359 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2360 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2361 "npoll_txoff", CTLTYPE_INT | CTLFLAG_RW,
2362 sc, 0, bnx_sysctl_npoll_txoff, "I",
2363 "NPOLLING TX cpu offset");
2367 #ifdef BNX_RSS_DEBUG
2368 SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx,
2369 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2370 "std_refill_mask", CTLFLAG_RD,
2371 &sc->bnx_rx_std_ring.bnx_rx_std_refill, 0, "");
2372 SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx,
2373 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2374 "std_used", CTLFLAG_RD,
2375 &sc->bnx_rx_std_ring.bnx_rx_std_used, 0, "");
2376 SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx,
2377 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2378 "rss_debug", CTLFLAG_RW, &sc->bnx_rss_debug, 0, "");
2379 for (i = 0; i < sc->bnx_rx_retcnt; ++i) {
2380 ksnprintf(desc, sizeof(desc), "rx_pkt%d", i);
2381 SYSCTL_ADD_ULONG(&sc->bnx_sysctl_ctx,
2382 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2383 desc, CTLFLAG_RW, &sc->bnx_rx_ret_ring[i].bnx_rx_pkt, "");
2385 ksnprintf(desc, sizeof(desc), "rx_force_sched%d", i);
2386 SYSCTL_ADD_ULONG(&sc->bnx_sysctl_ctx,
2387 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2389 &sc->bnx_rx_ret_ring[i].bnx_rx_force_sched, "");
2392 #ifdef BNX_TSS_DEBUG
2393 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
2394 ksnprintf(desc, sizeof(desc), "tx_pkt%d", i);
2395 SYSCTL_ADD_ULONG(&sc->bnx_sysctl_ctx,
2396 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2397 desc, CTLFLAG_RW, &sc->bnx_tx_ring[i].bnx_tx_pkt, "");
2401 SYSCTL_ADD_ULONG(&sc->bnx_sysctl_ctx,
2402 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2403 "norxbds", CTLFLAG_RW, &sc->bnx_norxbds, "");
2405 SYSCTL_ADD_ULONG(&sc->bnx_sysctl_ctx,
2406 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2407 "errors", CTLFLAG_RW, &sc->bnx_errors, "");
2409 #ifdef BNX_TSO_DEBUG
2410 for (i = 0; i < BNX_TSO_NSTATS; ++i) {
2411 ksnprintf(desc, sizeof(desc), "tso%d", i + 1);
2412 SYSCTL_ADD_ULONG(&sc->bnx_sysctl_ctx,
2413 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2414 desc, CTLFLAG_RW, &sc->bnx_tsosegs[i], "");
2419 * Call MI attach routine.
2421 ether_ifattach(ifp, ether_addr, NULL);
2423 /* Setup TX rings and subqueues */
2424 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
2425 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i);
2426 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
2428 ifsq_set_cpuid(ifsq, txr->bnx_tx_cpuid);
2429 ifsq_set_hw_serialize(ifsq, &txr->bnx_tx_serialize);
2430 ifsq_set_priv(ifsq, txr);
2431 txr->bnx_ifsq = ifsq;
2433 ifsq_watchdog_init(&txr->bnx_tx_watchdog, ifsq, bnx_watchdog);
2436 device_printf(dev, "txr %d -> cpu%d\n", i,
2441 error = bnx_setup_intr(sc);
2443 ether_ifdetach(ifp);
2446 bnx_set_tick_cpuid(sc, FALSE);
2449 * Create RX standard ring refilling thread
2451 std_cpuid_def = device_get_unit(dev) % ncpus;
2452 std_cpuid = device_getenv_int(dev, "std.cpuid", std_cpuid_def);
2453 if (std_cpuid < 0 || std_cpuid >= ncpus) {
2454 device_printf(dev, "invalid std.cpuid %d, use %d\n",
2455 std_cpuid, std_cpuid_def);
2456 std_cpuid = std_cpuid_def;
2459 std = &sc->bnx_rx_std_ring;
2460 lwkt_create(bnx_rx_std_refill_ithread, std, NULL,
2461 &std->bnx_rx_std_ithread, TDF_NOSTART | TDF_INTTHREAD, std_cpuid,
2462 "%s std", device_get_nameunit(dev));
2463 lwkt_setpri(&std->bnx_rx_std_ithread, TDPRI_INT_MED);
2464 std->bnx_rx_std_ithread.td_preemptable = lwkt_preempt;
2465 sc->bnx_flags |= BNX_FLAG_STD_THREAD;
2474 bnx_detach(device_t dev)
2476 struct bnx_softc *sc = device_get_softc(dev);
2478 if (device_is_attached(dev)) {
2479 struct ifnet *ifp = &sc->arpcom.ac_if;
2481 ifnet_serialize_all(ifp);
2484 bnx_teardown_intr(sc, sc->bnx_intr_cnt);
2485 ifnet_deserialize_all(ifp);
2487 ether_ifdetach(ifp);
2490 if (sc->bnx_flags & BNX_FLAG_STD_THREAD) {
2491 struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring;
2493 tsleep_interlock(std, 0);
2495 if (std->bnx_rx_std_ithread.td_gd == mycpu) {
2496 bnx_rx_std_refill_stop(std);
2498 lwkt_send_ipiq(std->bnx_rx_std_ithread.td_gd,
2499 bnx_rx_std_refill_stop, std);
2502 tsleep(std, PINTERLOCKED, "bnx_detach", 0);
2504 device_printf(dev, "RX std ithread exited\n");
2506 lwkt_synchronize_ipiqs("bnx_detach_ipiq");
2509 if (sc->bnx_flags & BNX_FLAG_TBI)
2510 ifmedia_removeall(&sc->bnx_ifmedia);
2512 device_delete_child(dev, sc->bnx_miibus);
2513 bus_generic_detach(dev);
2517 if (sc->bnx_msix_mem_res != NULL) {
2518 bus_release_resource(dev, SYS_RES_MEMORY, sc->bnx_msix_mem_rid,
2519 sc->bnx_msix_mem_res);
2521 if (sc->bnx_res != NULL) {
2522 bus_release_resource(dev, SYS_RES_MEMORY,
2523 BGE_PCI_BAR0, sc->bnx_res);
2526 if (sc->bnx_sysctl_tree != NULL)
2527 sysctl_ctx_free(&sc->bnx_sysctl_ctx);
2531 if (sc->bnx_serialize != NULL)
2532 kfree(sc->bnx_serialize, M_DEVBUF);
2538 bnx_reset(struct bnx_softc *sc)
2541 uint32_t cachesize, command, pcistate, reset;
2542 void (*write_op)(struct bnx_softc *, uint32_t, uint32_t);
2548 write_op = bnx_writemem_direct;
2550 /* Save some important PCI state. */
2551 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2552 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2553 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2555 pci_write_config(dev, BGE_PCI_MISC_CTL,
2556 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2557 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2558 BGE_PCIMISCCTL_TAGGED_STATUS, 4);
2560 /* Disable fastboot on controllers that support it. */
2562 if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n");
2563 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2566 * Write the magic number to SRAM at offset 0xB50.
2567 * When firmware finishes its initialization it will
2568 * write ~BGE_MAGIC_NUMBER to the same location.
2570 bnx_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2572 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2574 /* XXX: Broadcom Linux driver. */
2575 /* Force PCI-E 1.0a mode */
2576 if (!BNX_IS_57765_PLUS(sc) &&
2577 CSR_READ_4(sc, BGE_PCIE_PHY_TSTCTL) ==
2578 (BGE_PCIE_PHY_TSTCTL_PSCRAM |
2579 BGE_PCIE_PHY_TSTCTL_PCIE10)) {
2580 CSR_WRITE_4(sc, BGE_PCIE_PHY_TSTCTL,
2581 BGE_PCIE_PHY_TSTCTL_PSCRAM);
2583 if (sc->bnx_chipid != BGE_CHIPID_BCM5750_A0) {
2584 /* Prevent PCIE link training during global reset */
2585 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2590 * Set GPHY Power Down Override to leave GPHY
2591 * powered up in D0 uninitialized.
2593 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0)
2594 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
2596 /* Issue global reset */
2597 write_op(sc, BGE_MISC_CFG, reset);
2601 /* XXX: Broadcom Linux driver. */
2602 if (sc->bnx_chipid == BGE_CHIPID_BCM5750_A0) {
2605 DELAY(500000); /* wait for link training to complete */
2606 v = pci_read_config(dev, 0xc4, 4);
2607 pci_write_config(dev, 0xc4, v | (1<<15), 4);
2610 devctl = pci_read_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL, 2);
2612 /* Disable no snoop and disable relaxed ordering. */
2613 devctl &= ~(PCIEM_DEVCTL_RELAX_ORDER | PCIEM_DEVCTL_NOSNOOP);
2615 /* Old PCI-E chips only support 128 bytes Max PayLoad Size. */
2616 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0) {
2617 devctl &= ~PCIEM_DEVCTL_MAX_PAYLOAD_MASK;
2618 devctl |= PCIEM_DEVCTL_MAX_PAYLOAD_128;
2621 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL,
2624 /* Clear error status. */
2625 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVSTS,
2626 PCIEM_DEVSTS_CORR_ERR |
2627 PCIEM_DEVSTS_NFATAL_ERR |
2628 PCIEM_DEVSTS_FATAL_ERR |
2629 PCIEM_DEVSTS_UNSUPP_REQ, 2);
2631 /* Reset some of the PCI state that got zapped by reset */
2632 pci_write_config(dev, BGE_PCI_MISC_CTL,
2633 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2634 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2635 BGE_PCIMISCCTL_TAGGED_STATUS, 4);
2636 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2637 pci_write_config(dev, BGE_PCI_CMD, command, 4);
2638 write_op(sc, BGE_MISC_CFG, (65 << 1));
2640 /* Enable memory arbiter */
2641 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2644 * Poll until we see the 1's complement of the magic number.
2645 * This indicates that the firmware initialization is complete.
2647 for (i = 0; i < BNX_FIRMWARE_TIMEOUT; i++) {
2648 val = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2649 if (val == ~BGE_MAGIC_NUMBER)
2653 if (i == BNX_FIRMWARE_TIMEOUT) {
2654 if_printf(&sc->arpcom.ac_if, "firmware handshake "
2655 "timed out, found 0x%08x\n", val);
2658 /* BCM57765 A0 needs additional time before accessing. */
2659 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0)
2663 * XXX Wait for the value of the PCISTATE register to
2664 * return to its original pre-reset state. This is a
2665 * fairly good indicator of reset completion. If we don't
2666 * wait for the reset to fully complete, trying to read
2667 * from the device's non-PCI registers may yield garbage
2670 for (i = 0; i < BNX_TIMEOUT; i++) {
2671 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2676 /* Fix up byte swapping */
2677 CSR_WRITE_4(sc, BGE_MODE_CTL, bnx_dma_swap_options(sc));
2679 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2682 * The 5704 in TBI mode apparently needs some special
2683 * adjustment to insure the SERDES drive level is set
2686 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704 &&
2687 (sc->bnx_flags & BNX_FLAG_TBI)) {
2690 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2691 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2692 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2695 CSR_WRITE_4(sc, BGE_MI_MODE,
2696 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
2699 /* XXX: Broadcom Linux driver. */
2700 if (!BNX_IS_57765_PLUS(sc)) {
2703 /* Enable Data FIFO protection. */
2704 v = CSR_READ_4(sc, BGE_PCIE_TLDLPL_PORT);
2705 CSR_WRITE_4(sc, BGE_PCIE_TLDLPL_PORT, v | (1 << 25));
2710 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
2711 BNX_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
2712 CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
2717 * Frame reception handling. This is called if there's a frame
2718 * on the receive return list.
2720 * Note: we have to be able to handle two possibilities here:
2721 * 1) the frame is from the jumbo recieve ring
2722 * 2) the frame is from the standard receive ring
2726 bnx_rxeof(struct bnx_rx_ret_ring *ret, uint16_t rx_prod, int count)
2728 struct bnx_softc *sc = ret->bnx_sc;
2729 struct bnx_rx_std_ring *std = ret->bnx_std;
2730 struct ifnet *ifp = &sc->arpcom.ac_if;
2733 while (ret->bnx_rx_saved_considx != rx_prod && count != 0) {
2734 struct pktinfo pi0, *pi = NULL;
2735 struct bge_rx_bd *cur_rx;
2736 struct bnx_rx_buf *rb;
2738 struct mbuf *m = NULL;
2739 uint16_t vlan_tag = 0;
2744 cur_rx = &ret->bnx_rx_ret_ring[ret->bnx_rx_saved_considx];
2746 rxidx = cur_rx->bge_idx;
2747 KKASSERT(rxidx < BGE_STD_RX_RING_CNT);
2749 BNX_INC(ret->bnx_rx_saved_considx, BNX_RETURN_RING_CNT);
2750 #ifdef BNX_RSS_DEBUG
2754 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2756 vlan_tag = cur_rx->bge_vlan_tag;
2759 if (ret->bnx_rx_cnt >= ret->bnx_rx_cntmax) {
2760 atomic_add_int(&std->bnx_rx_std_used, std_used);
2763 bnx_rx_std_refill_sched(ret, std);
2768 rb = &std->bnx_rx_std_buf[rxidx];
2769 m = rb->bnx_rx_mbuf;
2770 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2771 IFNET_STAT_INC(ifp, ierrors, 1);
2773 rb->bnx_rx_refilled = 1;
2776 if (bnx_newbuf_std(ret, rxidx, 0)) {
2777 IFNET_STAT_INC(ifp, ierrors, 1);
2781 IFNET_STAT_INC(ifp, ipackets, 1);
2782 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2783 m->m_pkthdr.rcvif = ifp;
2785 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2786 (cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
2787 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2788 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2789 if ((cur_rx->bge_error_flag &
2790 BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
2791 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2793 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2794 m->m_pkthdr.csum_data =
2795 cur_rx->bge_tcp_udp_csum;
2796 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
2800 if (ifp->if_capenable & IFCAP_RSS) {
2801 pi = bnx_rss_info(&pi0, cur_rx);
2803 (cur_rx->bge_flags & BGE_RXBDFLAG_RSS_HASH)) {
2804 m->m_flags |= M_HASH;
2806 toeplitz_hash(cur_rx->bge_hash);
2811 * If we received a packet with a vlan tag, pass it
2812 * to vlan_input() instead of ether_input().
2815 m->m_flags |= M_VLANTAG;
2816 m->m_pkthdr.ether_vlantag = vlan_tag;
2818 ether_input_pkt(ifp, m, pi);
2820 bnx_writembx(sc, ret->bnx_rx_mbx, ret->bnx_rx_saved_considx);
2825 cur_std_used = atomic_fetchadd_int(&std->bnx_rx_std_used,
2827 if (cur_std_used + std_used >= (BGE_STD_RX_RING_CNT / 2)) {
2828 #ifdef BNX_RSS_DEBUG
2829 ret->bnx_rx_force_sched++;
2831 bnx_rx_std_refill_sched(ret, std);
2837 bnx_txeof(struct bnx_tx_ring *txr, uint16_t tx_cons)
2839 struct ifnet *ifp = &txr->bnx_sc->arpcom.ac_if;
2842 * Go through our tx ring and free mbufs for those
2843 * frames that have been sent.
2845 while (txr->bnx_tx_saved_considx != tx_cons) {
2846 struct bnx_tx_buf *buf;
2849 idx = txr->bnx_tx_saved_considx;
2850 buf = &txr->bnx_tx_buf[idx];
2851 if (buf->bnx_tx_mbuf != NULL) {
2852 IFNET_STAT_INC(ifp, opackets, 1);
2853 #ifdef BNX_TSS_DEBUG
2856 bus_dmamap_unload(txr->bnx_tx_mtag,
2857 buf->bnx_tx_dmamap);
2858 m_freem(buf->bnx_tx_mbuf);
2859 buf->bnx_tx_mbuf = NULL;
2862 BNX_INC(txr->bnx_tx_saved_considx, BGE_TX_RING_CNT);
2865 if ((BGE_TX_RING_CNT - txr->bnx_tx_cnt) >=
2866 (BNX_NSEG_RSVD + BNX_NSEG_SPARE))
2867 ifsq_clr_oactive(txr->bnx_ifsq);
2869 if (txr->bnx_tx_cnt == 0)
2870 txr->bnx_tx_watchdog.wd_timer = 0;
2872 if (!ifsq_is_empty(txr->bnx_ifsq))
2873 ifsq_devstart(txr->bnx_ifsq);
2877 bnx_handle_status(struct bnx_softc *sc)
2882 status = *sc->bnx_hw_status;
2884 if (status & BGE_STATFLAG_ERROR) {
2890 val = CSR_READ_4(sc, BGE_FLOW_ATTN);
2891 if (val & ~BGE_FLOWATTN_MB_LOWAT) {
2892 if_printf(&sc->arpcom.ac_if,
2893 "flow attn 0x%08x\n", val);
2897 val = CSR_READ_4(sc, BGE_MSI_STATUS);
2898 if (val & ~BGE_MSISTAT_MSI_PCI_REQ) {
2899 if_printf(&sc->arpcom.ac_if,
2900 "msi status 0x%08x\n", val);
2904 val = CSR_READ_4(sc, BGE_RDMA_STATUS);
2906 if_printf(&sc->arpcom.ac_if,
2907 "rmda status 0x%08x\n", val);
2911 val = CSR_READ_4(sc, BGE_WDMA_STATUS);
2913 if_printf(&sc->arpcom.ac_if,
2914 "wdma status 0x%08x\n", val);
2919 bnx_serialize_skipmain(sc);
2921 bnx_deserialize_skipmain(sc);
2926 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) || sc->bnx_link_evt) {
2928 if_printf(&sc->arpcom.ac_if, "link change, "
2929 "link_evt %d\n", sc->bnx_link_evt);
2938 #ifdef IFPOLL_ENABLE
2941 bnx_npoll_rx(struct ifnet *ifp __unused, void *xret, int cycle)
2943 struct bnx_rx_ret_ring *ret = xret;
2946 ASSERT_SERIALIZED(&ret->bnx_rx_ret_serialize);
2948 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag;
2951 rx_prod = *ret->bnx_rx_considx;
2952 if (ret->bnx_rx_saved_considx != rx_prod)
2953 bnx_rxeof(ret, rx_prod, cycle);
2957 bnx_npoll_tx_notag(struct ifnet *ifp __unused, void *xtxr, int cycle __unused)
2959 struct bnx_tx_ring *txr = xtxr;
2962 ASSERT_SERIALIZED(&txr->bnx_tx_serialize);
2964 tx_cons = *txr->bnx_tx_considx;
2965 if (txr->bnx_tx_saved_considx != tx_cons)
2966 bnx_txeof(txr, tx_cons);
2970 bnx_npoll_tx(struct ifnet *ifp, void *xtxr, int cycle)
2972 struct bnx_tx_ring *txr = xtxr;
2974 ASSERT_SERIALIZED(&txr->bnx_tx_serialize);
2976 txr->bnx_saved_status_tag = *txr->bnx_hw_status_tag;
2978 bnx_npoll_tx_notag(ifp, txr, cycle);
2982 bnx_npoll_status_notag(struct ifnet *ifp)
2984 struct bnx_softc *sc = ifp->if_softc;
2986 ASSERT_SERIALIZED(&sc->bnx_main_serialize);
2988 if (bnx_handle_status(sc)) {
2990 * Status changes are handled; force the chip to
2991 * update the status block to reflect whether there
2992 * are more status changes or not, else staled status
2993 * changes are always seen.
2995 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3000 bnx_npoll_status(struct ifnet *ifp)
3002 struct bnx_softc *sc = ifp->if_softc;
3004 ASSERT_SERIALIZED(&sc->bnx_main_serialize);
3006 sc->bnx_saved_status_tag = *sc->bnx_hw_status_tag;
3008 bnx_npoll_status_notag(ifp);
3012 bnx_npoll(struct ifnet *ifp, struct ifpoll_info *info)
3014 struct bnx_softc *sc = ifp->if_softc;
3017 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3020 if (sc->bnx_flags & BNX_FLAG_STATUS_HASTAG)
3021 info->ifpi_status.status_func = bnx_npoll_status;
3023 info->ifpi_status.status_func = bnx_npoll_status_notag;
3024 info->ifpi_status.serializer = &sc->bnx_main_serialize;
3026 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
3027 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
3028 int idx = i + sc->bnx_npoll_txoff;
3030 KKASSERT(idx < ncpus2);
3031 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) {
3032 info->ifpi_tx[idx].poll_func =
3035 info->ifpi_tx[idx].poll_func = bnx_npoll_tx;
3037 info->ifpi_tx[idx].arg = txr;
3038 info->ifpi_tx[idx].serializer = &txr->bnx_tx_serialize;
3039 ifsq_set_cpuid(txr->bnx_ifsq, idx);
3042 for (i = 0; i < sc->bnx_rx_retcnt; ++i) {
3043 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i];
3044 int idx = i + sc->bnx_npoll_rxoff;
3046 KKASSERT(idx < ncpus2);
3047 info->ifpi_rx[idx].poll_func = bnx_npoll_rx;
3048 info->ifpi_rx[idx].arg = ret;
3049 info->ifpi_rx[idx].serializer =
3050 &ret->bnx_rx_ret_serialize;
3053 if (ifp->if_flags & IFF_RUNNING) {
3054 bnx_disable_intr(sc);
3055 bnx_set_tick_cpuid(sc, TRUE);
3057 sc->bnx_coal_chg = BNX_TX_COAL_BDS_CHG |
3058 BNX_RX_COAL_BDS_CHG;
3059 bnx_coal_change(sc);
3062 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
3063 ifsq_set_cpuid(sc->bnx_tx_ring[i].bnx_ifsq,
3064 sc->bnx_tx_ring[i].bnx_tx_cpuid);
3066 if (ifp->if_flags & IFF_RUNNING) {
3067 sc->bnx_coal_chg = BNX_TX_COAL_BDS_CHG |
3068 BNX_RX_COAL_BDS_CHG;
3069 bnx_coal_change(sc);
3071 bnx_enable_intr(sc);
3072 bnx_set_tick_cpuid(sc, FALSE);
3077 #endif /* IFPOLL_ENABLE */
3080 bnx_intr_legacy(void *xsc)
3082 struct bnx_softc *sc = xsc;
3083 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0];
3085 if (ret->bnx_saved_status_tag == *ret->bnx_hw_status_tag) {
3088 val = pci_read_config(sc->bnx_dev, BGE_PCI_PCISTATE, 4);
3089 if (val & BGE_PCISTAT_INTR_NOTACT)
3095 * Interrupt will have to be disabled if tagged status
3096 * is used, else interrupt will always be asserted on
3097 * certain chips (at least on BCM5750 AX/BX).
3099 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3111 bnx_intr(struct bnx_softc *sc)
3113 struct ifnet *ifp = &sc->arpcom.ac_if;
3114 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0];
3116 ASSERT_SERIALIZED(&sc->bnx_main_serialize);
3118 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag;
3120 * Use a load fence to ensure that status_tag is saved
3121 * before rx_prod, tx_cons and status.
3125 bnx_handle_status(sc);
3127 if (ifp->if_flags & IFF_RUNNING) {
3128 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0];
3129 uint16_t rx_prod, tx_cons;
3131 lwkt_serialize_enter(&ret->bnx_rx_ret_serialize);
3132 rx_prod = *ret->bnx_rx_considx;
3133 if (ret->bnx_rx_saved_considx != rx_prod)
3134 bnx_rxeof(ret, rx_prod, -1);
3135 lwkt_serialize_exit(&ret->bnx_rx_ret_serialize);
3137 lwkt_serialize_enter(&txr->bnx_tx_serialize);
3138 tx_cons = *txr->bnx_tx_considx;
3139 if (txr->bnx_tx_saved_considx != tx_cons)
3140 bnx_txeof(txr, tx_cons);
3141 lwkt_serialize_exit(&txr->bnx_tx_serialize);
3144 bnx_writembx(sc, BGE_MBX_IRQ0_LO, ret->bnx_saved_status_tag << 24);
3148 bnx_msix_tx_status(void *xtxr)
3150 struct bnx_tx_ring *txr = xtxr;
3151 struct bnx_softc *sc = txr->bnx_sc;
3152 struct ifnet *ifp = &sc->arpcom.ac_if;
3154 ASSERT_SERIALIZED(&sc->bnx_main_serialize);
3156 txr->bnx_saved_status_tag = *txr->bnx_hw_status_tag;
3158 * Use a load fence to ensure that status_tag is saved
3159 * before tx_cons and status.
3163 bnx_handle_status(sc);
3165 if (ifp->if_flags & IFF_RUNNING) {
3168 lwkt_serialize_enter(&txr->bnx_tx_serialize);
3169 tx_cons = *txr->bnx_tx_considx;
3170 if (txr->bnx_tx_saved_considx != tx_cons)
3171 bnx_txeof(txr, tx_cons);
3172 lwkt_serialize_exit(&txr->bnx_tx_serialize);
3175 bnx_writembx(sc, BGE_MBX_IRQ0_LO, txr->bnx_saved_status_tag << 24);
3179 bnx_msix_rx(void *xret)
3181 struct bnx_rx_ret_ring *ret = xret;
3184 ASSERT_SERIALIZED(&ret->bnx_rx_ret_serialize);
3186 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag;
3188 * Use a load fence to ensure that status_tag is saved
3193 rx_prod = *ret->bnx_rx_considx;
3194 if (ret->bnx_rx_saved_considx != rx_prod)
3195 bnx_rxeof(ret, rx_prod, -1);
3197 bnx_writembx(ret->bnx_sc, ret->bnx_msix_mbx,
3198 ret->bnx_saved_status_tag << 24);
3202 bnx_msix_rxtx(void *xret)
3204 struct bnx_rx_ret_ring *ret = xret;
3205 struct bnx_tx_ring *txr = ret->bnx_txr;
3206 uint16_t rx_prod, tx_cons;
3208 ASSERT_SERIALIZED(&ret->bnx_rx_ret_serialize);
3210 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag;
3212 * Use a load fence to ensure that status_tag is saved
3213 * before rx_prod and tx_cons.
3217 rx_prod = *ret->bnx_rx_considx;
3218 if (ret->bnx_rx_saved_considx != rx_prod)
3219 bnx_rxeof(ret, rx_prod, -1);
3221 lwkt_serialize_enter(&txr->bnx_tx_serialize);
3222 tx_cons = *txr->bnx_tx_considx;
3223 if (txr->bnx_tx_saved_considx != tx_cons)
3224 bnx_txeof(txr, tx_cons);
3225 lwkt_serialize_exit(&txr->bnx_tx_serialize);
3227 bnx_writembx(ret->bnx_sc, ret->bnx_msix_mbx,
3228 ret->bnx_saved_status_tag << 24);
3232 bnx_msix_status(void *xsc)
3234 struct bnx_softc *sc = xsc;
3236 ASSERT_SERIALIZED(&sc->bnx_main_serialize);
3238 sc->bnx_saved_status_tag = *sc->bnx_hw_status_tag;
3240 * Use a load fence to ensure that status_tag is saved
3245 bnx_handle_status(sc);
3247 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_saved_status_tag << 24);
3253 struct bnx_softc *sc = xsc;
3255 lwkt_serialize_enter(&sc->bnx_main_serialize);
3257 bnx_stats_update_regs(sc);
3259 if (sc->bnx_flags & BNX_FLAG_TBI) {
3261 * Since in TBI mode auto-polling can't be used we should poll
3262 * link status manually. Here we register pending link event
3263 * and trigger interrupt.
3266 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3267 } else if (!sc->bnx_link) {
3268 mii_tick(device_get_softc(sc->bnx_miibus));
3271 callout_reset_bycpu(&sc->bnx_tick_timer, hz, bnx_tick, sc,
3272 sc->bnx_tick_cpuid);
3274 lwkt_serialize_exit(&sc->bnx_main_serialize);
3278 bnx_stats_update_regs(struct bnx_softc *sc)
3280 struct ifnet *ifp = &sc->arpcom.ac_if;
3281 struct bge_mac_stats_regs stats;
3285 s = (uint32_t *)&stats;
3286 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
3287 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
3291 IFNET_STAT_SET(ifp, collisions,
3292 (stats.dot3StatsSingleCollisionFrames +
3293 stats.dot3StatsMultipleCollisionFrames +
3294 stats.dot3StatsExcessiveCollisions +
3295 stats.dot3StatsLateCollisions));
3297 val = CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
3298 sc->bnx_norxbds += val;
3302 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3303 * pointers to descriptors.
3306 bnx_encap(struct bnx_tx_ring *txr, struct mbuf **m_head0, uint32_t *txidx,
3309 struct bge_tx_bd *d = NULL;
3310 uint16_t csum_flags = 0, vlan_tag = 0, mss = 0;
3311 bus_dma_segment_t segs[BNX_NSEG_NEW];
3313 int error, maxsegs, nsegs, idx, i;
3314 struct mbuf *m_head = *m_head0, *m_new;
3316 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3317 #ifdef BNX_TSO_DEBUG
3321 error = bnx_setup_tso(txr, m_head0, &mss, &csum_flags);
3326 #ifdef BNX_TSO_DEBUG
3327 tso_nsegs = (m_head->m_pkthdr.len /
3328 m_head->m_pkthdr.tso_segsz) - 1;
3329 if (tso_nsegs > (BNX_TSO_NSTATS - 1))
3330 tso_nsegs = BNX_TSO_NSTATS - 1;
3331 else if (tso_nsegs < 0)
3333 txr->bnx_sc->bnx_tsosegs[tso_nsegs]++;
3335 } else if (m_head->m_pkthdr.csum_flags & BNX_CSUM_FEATURES) {
3336 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
3337 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3338 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
3339 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3340 if (m_head->m_flags & M_LASTFRAG)
3341 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3342 else if (m_head->m_flags & M_FRAG)
3343 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3345 if (m_head->m_flags & M_VLANTAG) {
3346 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
3347 vlan_tag = m_head->m_pkthdr.ether_vlantag;
3351 map = txr->bnx_tx_buf[idx].bnx_tx_dmamap;
3353 maxsegs = (BGE_TX_RING_CNT - txr->bnx_tx_cnt) - BNX_NSEG_RSVD;
3354 KASSERT(maxsegs >= BNX_NSEG_SPARE,
3355 ("not enough segments %d", maxsegs));
3357 if (maxsegs > BNX_NSEG_NEW)
3358 maxsegs = BNX_NSEG_NEW;
3361 * Pad outbound frame to BGE_MIN_FRAMELEN for an unusual reason.
3362 * The bge hardware will pad out Tx runts to BGE_MIN_FRAMELEN,
3363 * but when such padded frames employ the bge IP/TCP checksum
3364 * offload, the hardware checksum assist gives incorrect results
3365 * (possibly from incorporating its own padding into the UDP/TCP
3366 * checksum; who knows). If we pad such runts with zeros, the
3367 * onboard checksum comes out correct.
3369 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
3370 m_head->m_pkthdr.len < BNX_MIN_FRAMELEN) {
3371 error = m_devpad(m_head, BNX_MIN_FRAMELEN);
3376 if ((txr->bnx_tx_flags & BNX_TX_FLAG_SHORTDMA) &&
3377 m_head->m_next != NULL) {
3378 m_new = bnx_defrag_shortdma(m_head);
3379 if (m_new == NULL) {
3383 *m_head0 = m_head = m_new;
3385 if ((m_head->m_pkthdr.csum_flags & CSUM_TSO) == 0 &&
3386 (txr->bnx_tx_flags & BNX_TX_FLAG_FORCE_DEFRAG) &&
3387 m_head->m_next != NULL) {
3389 * Forcefully defragment mbuf chain to overcome hardware
3390 * limitation which only support a single outstanding
3391 * DMA read operation. If it fails, keep moving on using
3392 * the original mbuf chain.
3394 m_new = m_defrag(m_head, MB_DONTWAIT);
3396 *m_head0 = m_head = m_new;
3399 error = bus_dmamap_load_mbuf_defrag(txr->bnx_tx_mtag, map,
3400 m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
3403 *segs_used += nsegs;
3406 bus_dmamap_sync(txr->bnx_tx_mtag, map, BUS_DMASYNC_PREWRITE);
3408 for (i = 0; ; i++) {
3409 d = &txr->bnx_tx_ring[idx];
3411 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
3412 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
3413 d->bge_len = segs[i].ds_len;
3414 d->bge_flags = csum_flags;
3415 d->bge_vlan_tag = vlan_tag;
3420 BNX_INC(idx, BGE_TX_RING_CNT);
3422 /* Mark the last segment as end of packet... */
3423 d->bge_flags |= BGE_TXBDFLAG_END;
3426 * Insure that the map for this transmission is placed at
3427 * the array index of the last descriptor in this chain.
3429 txr->bnx_tx_buf[*txidx].bnx_tx_dmamap = txr->bnx_tx_buf[idx].bnx_tx_dmamap;
3430 txr->bnx_tx_buf[idx].bnx_tx_dmamap = map;
3431 txr->bnx_tx_buf[idx].bnx_tx_mbuf = m_head;
3432 txr->bnx_tx_cnt += nsegs;
3434 BNX_INC(idx, BGE_TX_RING_CNT);
3445 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3446 * to the mbuf data regions directly in the transmit descriptors.
3449 bnx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
3451 struct bnx_tx_ring *txr = ifsq_get_priv(ifsq);
3452 struct mbuf *m_head = NULL;
3456 KKASSERT(txr->bnx_ifsq == ifsq);
3457 ASSERT_SERIALIZED(&txr->bnx_tx_serialize);
3459 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq))
3462 prodidx = txr->bnx_tx_prodidx;
3464 while (txr->bnx_tx_buf[prodidx].bnx_tx_mbuf == NULL) {
3466 * Sanity check: avoid coming within BGE_NSEG_RSVD
3467 * descriptors of the end of the ring. Also make
3468 * sure there are BGE_NSEG_SPARE descriptors for
3469 * jumbo buffers' or TSO segments' defragmentation.
3471 if ((BGE_TX_RING_CNT - txr->bnx_tx_cnt) <
3472 (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) {
3473 ifsq_set_oactive(ifsq);
3477 m_head = ifsq_dequeue(ifsq);
3482 * Pack the data into the transmit ring. If we
3483 * don't have room, set the OACTIVE flag and wait
3484 * for the NIC to drain the ring.
3486 if (bnx_encap(txr, &m_head, &prodidx, &nsegs)) {
3487 ifsq_set_oactive(ifsq);
3488 IFNET_STAT_INC(ifp, oerrors, 1);
3492 if (nsegs >= txr->bnx_tx_wreg) {
3494 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, prodidx);
3498 ETHER_BPF_MTAP(ifp, m_head);
3501 * Set a timeout in case the chip goes out to lunch.
3503 txr->bnx_tx_watchdog.wd_timer = 5;
3508 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, prodidx);
3510 txr->bnx_tx_prodidx = prodidx;
3516 struct bnx_softc *sc = xsc;
3517 struct ifnet *ifp = &sc->arpcom.ac_if;
3523 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3525 /* Cancel pending I/O and flush buffers. */
3531 * Init the various state machines, ring
3532 * control blocks and firmware.
3534 if (bnx_blockinit(sc)) {
3535 if_printf(ifp, "initialization failure\n");
3541 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3542 ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
3544 /* Load our MAC address. */
3545 m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
3546 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3547 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3549 /* Enable or disable promiscuous mode as needed. */
3552 /* Program multicast filter. */
3556 if (bnx_init_rx_ring_std(&sc->bnx_rx_std_ring)) {
3557 if_printf(ifp, "RX ring initialization failed\n");
3562 /* Init jumbo RX ring. */
3563 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) {
3564 if (bnx_init_rx_ring_jumbo(sc)) {
3565 if_printf(ifp, "Jumbo RX ring initialization failed\n");
3571 /* Init our RX return ring index */
3572 for (i = 0; i < sc->bnx_rx_retcnt; ++i) {
3573 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i];
3575 ret->bnx_rx_saved_considx = 0;
3576 ret->bnx_rx_cnt = 0;
3580 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
3581 bnx_init_tx_ring(&sc->bnx_tx_ring[i]);
3583 /* Enable TX MAC state machine lockup fix. */
3584 mode = CSR_READ_4(sc, BGE_TX_MODE);
3585 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
3586 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
3587 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
3588 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
3589 mode |= CSR_READ_4(sc, BGE_TX_MODE) &
3590 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
3592 /* Turn on transmitter */
3593 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
3595 /* Initialize RSS */
3596 mode = BGE_RXMODE_ENABLE;
3597 if (BNX_RSS_ENABLED(sc)) {
3599 mode |= BGE_RXMODE_RSS_ENABLE |
3600 BGE_RXMODE_RSS_HASH_MASK_BITS |
3601 BGE_RXMODE_RSS_IPV4_HASH |
3602 BGE_RXMODE_RSS_TCP_IPV4_HASH;
3604 /* Turn on receiver */
3605 BNX_SETBIT(sc, BGE_RX_MODE, mode);
3608 * Set the number of good frames to receive after RX MBUF
3609 * Low Watermark has been reached. After the RX MAC receives
3610 * this number of frames, it will drop subsequent incoming
3611 * frames until the MBUF High Watermark is reached.
3613 if (BNX_IS_57765_FAMILY(sc))
3614 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
3616 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
3618 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI ||
3619 sc->bnx_intr_type == PCI_INTR_TYPE_MSIX) {
3621 if_printf(ifp, "MSI_MODE: %#x\n",
3622 CSR_READ_4(sc, BGE_MSI_MODE));
3626 /* Tell firmware we're alive. */
3627 BNX_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3629 /* Enable host interrupts if polling(4) is not enabled. */
3630 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA, 4);
3633 #ifdef IFPOLL_ENABLE
3634 if (ifp->if_flags & IFF_NPOLLING)
3638 bnx_disable_intr(sc);
3640 bnx_enable_intr(sc);
3641 bnx_set_tick_cpuid(sc, polling);
3643 bnx_ifmedia_upd(ifp);
3645 ifp->if_flags |= IFF_RUNNING;
3646 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
3647 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
3649 ifsq_clr_oactive(txr->bnx_ifsq);
3650 ifsq_watchdog_start(&txr->bnx_tx_watchdog);
3653 callout_reset_bycpu(&sc->bnx_tick_timer, hz, bnx_tick, sc,
3654 sc->bnx_tick_cpuid);
3658 * Set media options.
3661 bnx_ifmedia_upd(struct ifnet *ifp)
3663 struct bnx_softc *sc = ifp->if_softc;
3665 /* If this is a 1000baseX NIC, enable the TBI port. */
3666 if (sc->bnx_flags & BNX_FLAG_TBI) {
3667 struct ifmedia *ifm = &sc->bnx_ifmedia;
3669 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3672 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3677 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3678 BNX_CLRBIT(sc, BGE_MAC_MODE,
3679 BGE_MACMODE_HALF_DUPLEX);
3681 BNX_SETBIT(sc, BGE_MAC_MODE,
3682 BGE_MACMODE_HALF_DUPLEX);
3689 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3693 if (mii->mii_instance) {
3694 struct mii_softc *miisc;
3696 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3697 mii_phy_reset(miisc);
3702 * Force an interrupt so that we will call bnx_link_upd
3703 * if needed and clear any pending link state attention.
3704 * Without this we are not getting any further interrupts
3705 * for link state changes and thus will not UP the link and
3706 * not be able to send in bnx_start. The only way to get
3707 * things working was to receive a packet and get an RX
3710 * bnx_tick should help for fiber cards and we might not
3711 * need to do this here if BNX_FLAG_TBI is set but as
3712 * we poll for fiber anyway it should not harm.
3714 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3720 * Report current media status.
3723 bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3725 struct bnx_softc *sc = ifp->if_softc;
3727 if (sc->bnx_flags & BNX_FLAG_TBI) {
3728 ifmr->ifm_status = IFM_AVALID;
3729 ifmr->ifm_active = IFM_ETHER;
3730 if (CSR_READ_4(sc, BGE_MAC_STS) &
3731 BGE_MACSTAT_TBI_PCS_SYNCHED) {
3732 ifmr->ifm_status |= IFM_ACTIVE;
3734 ifmr->ifm_active |= IFM_NONE;
3738 ifmr->ifm_active |= IFM_1000_SX;
3739 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3740 ifmr->ifm_active |= IFM_HDX;
3742 ifmr->ifm_active |= IFM_FDX;
3744 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3747 ifmr->ifm_active = mii->mii_media_active;
3748 ifmr->ifm_status = mii->mii_media_status;
3753 bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
3755 struct bnx_softc *sc = ifp->if_softc;
3756 struct ifreq *ifr = (struct ifreq *)data;
3757 int mask, error = 0;
3759 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3763 if ((!BNX_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) ||
3764 (BNX_IS_JUMBO_CAPABLE(sc) &&
3765 ifr->ifr_mtu > BNX_JUMBO_MTU)) {
3767 } else if (ifp->if_mtu != ifr->ifr_mtu) {
3768 ifp->if_mtu = ifr->ifr_mtu;
3769 if (ifp->if_flags & IFF_RUNNING)
3774 if (ifp->if_flags & IFF_UP) {
3775 if (ifp->if_flags & IFF_RUNNING) {
3776 mask = ifp->if_flags ^ sc->bnx_if_flags;
3779 * If only the state of the PROMISC flag
3780 * changed, then just use the 'set promisc
3781 * mode' command instead of reinitializing
3782 * the entire NIC. Doing a full re-init
3783 * means reloading the firmware and waiting
3784 * for it to start up, which may take a
3785 * second or two. Similarly for ALLMULTI.
3787 if (mask & IFF_PROMISC)
3789 if (mask & IFF_ALLMULTI)
3794 } else if (ifp->if_flags & IFF_RUNNING) {
3797 sc->bnx_if_flags = ifp->if_flags;
3801 if (ifp->if_flags & IFF_RUNNING)
3806 if (sc->bnx_flags & BNX_FLAG_TBI) {
3807 error = ifmedia_ioctl(ifp, ifr,
3808 &sc->bnx_ifmedia, command);
3810 struct mii_data *mii;
3812 mii = device_get_softc(sc->bnx_miibus);
3813 error = ifmedia_ioctl(ifp, ifr,
3814 &mii->mii_media, command);
3818 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3819 if (mask & IFCAP_HWCSUM) {
3820 ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
3821 if (ifp->if_capenable & IFCAP_TXCSUM)
3822 ifp->if_hwassist |= BNX_CSUM_FEATURES;
3824 ifp->if_hwassist &= ~BNX_CSUM_FEATURES;
3826 if (mask & IFCAP_TSO) {
3827 ifp->if_capenable ^= (mask & IFCAP_TSO);
3828 if (ifp->if_capenable & IFCAP_TSO)
3829 ifp->if_hwassist |= CSUM_TSO;
3831 ifp->if_hwassist &= ~CSUM_TSO;
3833 if (mask & IFCAP_RSS)
3834 ifp->if_capenable ^= IFCAP_RSS;
3837 error = ether_ioctl(ifp, command, data);
3844 bnx_watchdog(struct ifaltq_subque *ifsq)
3846 struct ifnet *ifp = ifsq_get_ifp(ifsq);
3847 struct bnx_softc *sc = ifp->if_softc;
3850 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3852 if_printf(ifp, "watchdog timeout -- resetting\n");
3856 IFNET_STAT_INC(ifp, oerrors, 1);
3858 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
3859 ifsq_devstart_sched(sc->bnx_tx_ring[i].bnx_ifsq);
3863 * Stop the adapter and free any mbufs allocated to the
3867 bnx_stop(struct bnx_softc *sc)
3869 struct ifnet *ifp = &sc->arpcom.ac_if;
3872 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3874 callout_stop(&sc->bnx_tick_timer);
3877 * Disable all of the receiver blocks
3879 bnx_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3880 bnx_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3881 bnx_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3882 bnx_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3883 bnx_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3884 bnx_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3887 * Disable all of the transmit blocks
3889 bnx_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3890 bnx_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3891 bnx_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3892 bnx_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3893 bnx_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3894 bnx_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3897 * Shut down all of the memory managers and related
3900 bnx_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3901 bnx_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3902 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3903 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3905 /* Disable host interrupts. */
3906 bnx_disable_intr(sc);
3909 * Tell firmware we're shutting down.
3911 BNX_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3913 /* Free the RX lists. */
3914 bnx_free_rx_ring_std(&sc->bnx_rx_std_ring);
3916 /* Free jumbo RX list. */
3917 if (BNX_IS_JUMBO_CAPABLE(sc))
3918 bnx_free_rx_ring_jumbo(sc);
3920 /* Free TX buffers. */
3921 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
3922 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
3924 txr->bnx_saved_status_tag = 0;
3925 bnx_free_tx_ring(txr);
3928 /* Clear saved status tag */
3929 for (i = 0; i < sc->bnx_rx_retcnt; ++i)
3930 sc->bnx_rx_ret_ring[i].bnx_saved_status_tag = 0;
3933 sc->bnx_coal_chg = 0;
3935 ifp->if_flags &= ~IFF_RUNNING;
3936 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
3937 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
3939 ifsq_clr_oactive(txr->bnx_ifsq);
3940 ifsq_watchdog_stop(&txr->bnx_tx_watchdog);
3945 * Stop all chip I/O so that the kernel's probe routines don't
3946 * get confused by errant DMAs when rebooting.
3949 bnx_shutdown(device_t dev)
3951 struct bnx_softc *sc = device_get_softc(dev);
3952 struct ifnet *ifp = &sc->arpcom.ac_if;
3954 ifnet_serialize_all(ifp);
3957 ifnet_deserialize_all(ifp);
3961 bnx_suspend(device_t dev)
3963 struct bnx_softc *sc = device_get_softc(dev);
3964 struct ifnet *ifp = &sc->arpcom.ac_if;
3966 ifnet_serialize_all(ifp);
3968 ifnet_deserialize_all(ifp);
3974 bnx_resume(device_t dev)
3976 struct bnx_softc *sc = device_get_softc(dev);
3977 struct ifnet *ifp = &sc->arpcom.ac_if;
3979 ifnet_serialize_all(ifp);
3981 if (ifp->if_flags & IFF_UP) {
3985 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
3986 ifsq_devstart_sched(sc->bnx_tx_ring[i].bnx_ifsq);
3989 ifnet_deserialize_all(ifp);
3995 bnx_setpromisc(struct bnx_softc *sc)
3997 struct ifnet *ifp = &sc->arpcom.ac_if;
3999 if (ifp->if_flags & IFF_PROMISC)
4000 BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
4002 BNX_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
4006 bnx_dma_free(struct bnx_softc *sc)
4008 struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring;
4011 /* Destroy RX return rings */
4012 if (sc->bnx_rx_ret_ring != NULL) {
4013 for (i = 0; i < sc->bnx_rx_retcnt; ++i)
4014 bnx_destroy_rx_ret_ring(&sc->bnx_rx_ret_ring[i]);
4015 kfree(sc->bnx_rx_ret_ring, M_DEVBUF);
4018 /* Destroy RX mbuf DMA stuffs. */
4019 if (std->bnx_rx_mtag != NULL) {
4020 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
4021 KKASSERT(std->bnx_rx_std_buf[i].bnx_rx_mbuf == NULL);
4022 bus_dmamap_destroy(std->bnx_rx_mtag,
4023 std->bnx_rx_std_buf[i].bnx_rx_dmamap);
4025 bus_dma_tag_destroy(std->bnx_rx_mtag);
4028 /* Destroy standard RX ring */
4029 bnx_dma_block_free(std->bnx_rx_std_ring_tag,
4030 std->bnx_rx_std_ring_map, std->bnx_rx_std_ring);
4032 /* Destroy TX rings */
4033 if (sc->bnx_tx_ring != NULL) {
4034 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
4035 bnx_destroy_tx_ring(&sc->bnx_tx_ring[i]);
4036 kfree(sc->bnx_tx_ring, M_DEVBUF);
4039 if (BNX_IS_JUMBO_CAPABLE(sc))
4040 bnx_free_jumbo_mem(sc);
4042 /* Destroy status blocks */
4043 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
4044 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
4046 bnx_dma_block_free(intr->bnx_status_tag,
4047 intr->bnx_status_map, intr->bnx_status_block);
4050 /* Destroy the parent tag */
4051 if (sc->bnx_cdata.bnx_parent_tag != NULL)
4052 bus_dma_tag_destroy(sc->bnx_cdata.bnx_parent_tag);
4056 bnx_dma_alloc(device_t dev)
4058 struct bnx_softc *sc = device_get_softc(dev);
4059 struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring;
4063 * Allocate the parent bus DMA tag appropriate for PCI.
4065 * All of the NetExtreme/NetLink controllers have 4GB boundary
4067 * Whenever an address crosses a multiple of the 4GB boundary
4068 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
4069 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
4070 * state machine will lockup and cause the device to hang.
4072 error = bus_dma_tag_create(NULL, 1, BGE_DMA_BOUNDARY_4G,
4073 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
4074 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
4075 0, &sc->bnx_cdata.bnx_parent_tag);
4077 device_printf(dev, "could not create parent DMA tag\n");
4082 * Create DMA stuffs for status blocks.
4084 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
4085 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
4087 error = bnx_dma_block_alloc(sc,
4088 __VM_CACHELINE_ALIGN(BGE_STATUS_BLK_SZ),
4089 &intr->bnx_status_tag, &intr->bnx_status_map,
4090 (void *)&intr->bnx_status_block,
4091 &intr->bnx_status_block_paddr);
4094 "could not create %dth status block\n", i);
4098 sc->bnx_hw_status = &sc->bnx_intr_data[0].bnx_status_block->bge_status;
4099 if (sc->bnx_flags & BNX_FLAG_STATUS_HASTAG) {
4100 sc->bnx_hw_status_tag =
4101 &sc->bnx_intr_data[0].bnx_status_block->bge_status_tag;
4105 * Create DMA tag and maps for RX mbufs.
4108 lwkt_serialize_init(&std->bnx_rx_std_serialize);
4109 error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0,
4110 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
4111 NULL, NULL, MCLBYTES, 1, MCLBYTES,
4112 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, &std->bnx_rx_mtag);
4114 device_printf(dev, "could not create RX mbuf DMA tag\n");
4118 for (i = 0; i < BGE_STD_RX_RING_CNT; ++i) {
4119 error = bus_dmamap_create(std->bnx_rx_mtag, BUS_DMA_WAITOK,
4120 &std->bnx_rx_std_buf[i].bnx_rx_dmamap);
4124 for (j = 0; j < i; ++j) {
4125 bus_dmamap_destroy(std->bnx_rx_mtag,
4126 std->bnx_rx_std_buf[j].bnx_rx_dmamap);
4128 bus_dma_tag_destroy(std->bnx_rx_mtag);
4129 std->bnx_rx_mtag = NULL;
4132 "could not create %dth RX mbuf DMA map\n", i);
4138 * Create DMA stuffs for standard RX ring.
4140 error = bnx_dma_block_alloc(sc, BGE_STD_RX_RING_SZ,
4141 &std->bnx_rx_std_ring_tag,
4142 &std->bnx_rx_std_ring_map,
4143 (void *)&std->bnx_rx_std_ring,
4144 &std->bnx_rx_std_ring_paddr);
4146 device_printf(dev, "could not create std RX ring\n");
4151 * Create RX return rings
4153 mbx = BGE_MBX_RX_CONS0_LO;
4154 sc->bnx_rx_ret_ring = kmalloc_cachealign(
4155 sizeof(struct bnx_rx_ret_ring) * sc->bnx_rx_retcnt, M_DEVBUF,
4157 for (i = 0; i < sc->bnx_rx_retcnt; ++i) {
4158 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i];
4159 struct bnx_intr_data *intr;
4163 ret->bnx_rx_mbx = mbx;
4164 ret->bnx_rx_cntmax = (BGE_STD_RX_RING_CNT / 4) /
4166 ret->bnx_rx_mask = 1 << i;
4168 if (!BNX_RSS_ENABLED(sc)) {
4169 intr = &sc->bnx_intr_data[0];
4171 KKASSERT(i + 1 < sc->bnx_intr_cnt);
4172 intr = &sc->bnx_intr_data[i + 1];
4176 ret->bnx_rx_considx =
4177 &intr->bnx_status_block->bge_idx[0].bge_rx_prod_idx;
4178 } else if (i == 1) {
4179 ret->bnx_rx_considx =
4180 &intr->bnx_status_block->bge_rx_jumbo_cons_idx;
4181 } else if (i == 2) {
4182 ret->bnx_rx_considx =
4183 &intr->bnx_status_block->bge_rsvd1;
4184 } else if (i == 3) {
4185 ret->bnx_rx_considx =
4186 &intr->bnx_status_block->bge_rx_mini_cons_idx;
4188 panic("unknown RX return ring %d\n", i);
4190 ret->bnx_hw_status_tag =
4191 &intr->bnx_status_block->bge_status_tag;
4193 error = bnx_create_rx_ret_ring(ret);
4196 "could not create %dth RX ret ring\n", i);
4205 sc->bnx_tx_ring = kmalloc_cachealign(
4206 sizeof(struct bnx_tx_ring) * sc->bnx_tx_ringcnt, M_DEVBUF,
4208 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
4209 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
4210 struct bnx_intr_data *intr;
4213 txr->bnx_tx_mbx = bnx_tx_mailbox[i];
4215 if (sc->bnx_tx_ringcnt == 1) {
4216 intr = &sc->bnx_intr_data[0];
4218 KKASSERT(i + 1 < sc->bnx_intr_cnt);
4219 intr = &sc->bnx_intr_data[i + 1];
4222 if ((sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) == 0) {
4223 txr->bnx_hw_status_tag =
4224 &intr->bnx_status_block->bge_status_tag;
4226 txr->bnx_tx_considx =
4227 &intr->bnx_status_block->bge_idx[0].bge_tx_cons_idx;
4229 error = bnx_create_tx_ring(txr);
4232 "could not create %dth TX ring\n", i);
4238 * Create jumbo buffer pool.
4240 if (BNX_IS_JUMBO_CAPABLE(sc)) {
4241 error = bnx_alloc_jumbo_mem(sc);
4244 "could not create jumbo buffer pool\n");
4253 bnx_dma_block_alloc(struct bnx_softc *sc, bus_size_t size, bus_dma_tag_t *tag,
4254 bus_dmamap_t *map, void **addr, bus_addr_t *paddr)
4259 error = bus_dmamem_coherent(sc->bnx_cdata.bnx_parent_tag, PAGE_SIZE, 0,
4260 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
4261 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
4265 *tag = dmem.dmem_tag;
4266 *map = dmem.dmem_map;
4267 *addr = dmem.dmem_addr;
4268 *paddr = dmem.dmem_busaddr;
4274 bnx_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr)
4277 bus_dmamap_unload(tag, map);
4278 bus_dmamem_free(tag, addr, map);
4279 bus_dma_tag_destroy(tag);
4284 bnx_tbi_link_upd(struct bnx_softc *sc, uint32_t status)
4286 struct ifnet *ifp = &sc->arpcom.ac_if;
4288 #define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE)
4291 * Sometimes PCS encoding errors are detected in
4292 * TBI mode (on fiber NICs), and for some reason
4293 * the chip will signal them as link changes.
4294 * If we get a link change event, but the 'PCS
4295 * encoding error' bit in the MAC status register
4296 * is set, don't bother doing a link check.
4297 * This avoids spurious "gigabit link up" messages
4298 * that sometimes appear on fiber NICs during
4299 * periods of heavy traffic.
4301 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
4302 if (!sc->bnx_link) {
4304 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704) {
4305 BNX_CLRBIT(sc, BGE_MAC_MODE,
4306 BGE_MACMODE_TBI_SEND_CFGS);
4308 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
4311 if_printf(ifp, "link UP\n");
4313 ifp->if_link_state = LINK_STATE_UP;
4314 if_link_state_change(ifp);
4316 } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) {
4321 if_printf(ifp, "link DOWN\n");
4323 ifp->if_link_state = LINK_STATE_DOWN;
4324 if_link_state_change(ifp);
4328 #undef PCS_ENCODE_ERR
4330 /* Clear the attention. */
4331 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
4332 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
4333 BGE_MACSTAT_LINK_CHANGED);
4337 bnx_copper_link_upd(struct bnx_softc *sc, uint32_t status __unused)
4339 struct ifnet *ifp = &sc->arpcom.ac_if;
4340 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
4343 bnx_miibus_statchg(sc->bnx_dev);
4347 if_printf(ifp, "link UP\n");
4349 if_printf(ifp, "link DOWN\n");
4352 /* Clear the attention. */
4353 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
4354 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
4355 BGE_MACSTAT_LINK_CHANGED);
4359 bnx_autopoll_link_upd(struct bnx_softc *sc, uint32_t status __unused)
4361 struct ifnet *ifp = &sc->arpcom.ac_if;
4362 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
4366 if (!sc->bnx_link &&
4367 (mii->mii_media_status & IFM_ACTIVE) &&
4368 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4371 if_printf(ifp, "link UP\n");
4372 } else if (sc->bnx_link &&
4373 (!(mii->mii_media_status & IFM_ACTIVE) ||
4374 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4377 if_printf(ifp, "link DOWN\n");
4380 /* Clear the attention. */
4381 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
4382 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
4383 BGE_MACSTAT_LINK_CHANGED);
4387 bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS)
4389 struct bnx_softc *sc = arg1;
4391 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
4392 &sc->bnx_rx_coal_ticks,
4393 BNX_RX_COAL_TICKS_MIN, BNX_RX_COAL_TICKS_MAX,
4394 BNX_RX_COAL_TICKS_CHG);
4398 bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS)
4400 struct bnx_softc *sc = arg1;
4402 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
4403 &sc->bnx_tx_coal_ticks,
4404 BNX_TX_COAL_TICKS_MIN, BNX_TX_COAL_TICKS_MAX,
4405 BNX_TX_COAL_TICKS_CHG);
4409 bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS)
4411 struct bnx_softc *sc = arg1;
4413 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
4414 &sc->bnx_rx_coal_bds,
4415 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX,
4416 BNX_RX_COAL_BDS_CHG);
4420 bnx_sysctl_rx_coal_bds_poll(SYSCTL_HANDLER_ARGS)
4422 struct bnx_softc *sc = arg1;
4424 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
4425 &sc->bnx_rx_coal_bds_poll,
4426 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX,
4427 BNX_RX_COAL_BDS_CHG);
4431 bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS)
4433 struct bnx_softc *sc = arg1;
4435 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
4436 &sc->bnx_tx_coal_bds,
4437 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX,
4438 BNX_TX_COAL_BDS_CHG);
4442 bnx_sysctl_tx_coal_bds_poll(SYSCTL_HANDLER_ARGS)
4444 struct bnx_softc *sc = arg1;
4446 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
4447 &sc->bnx_tx_coal_bds_poll,
4448 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX,
4449 BNX_TX_COAL_BDS_CHG);
4453 bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS)
4455 struct bnx_softc *sc = arg1;
4457 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
4458 &sc->bnx_rx_coal_bds_int,
4459 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX,
4460 BNX_RX_COAL_BDS_INT_CHG);
4464 bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS)
4466 struct bnx_softc *sc = arg1;
4468 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
4469 &sc->bnx_tx_coal_bds_int,
4470 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX,
4471 BNX_TX_COAL_BDS_INT_CHG);
4475 bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal,
4476 int coal_min, int coal_max, uint32_t coal_chg_mask)
4478 struct bnx_softc *sc = arg1;
4479 struct ifnet *ifp = &sc->arpcom.ac_if;
4482 ifnet_serialize_all(ifp);
4485 error = sysctl_handle_int(oidp, &v, 0, req);
4486 if (!error && req->newptr != NULL) {
4487 if (v < coal_min || v > coal_max) {
4491 sc->bnx_coal_chg |= coal_chg_mask;
4493 /* Commit changes */
4494 bnx_coal_change(sc);
4498 ifnet_deserialize_all(ifp);
4503 bnx_coal_change(struct bnx_softc *sc)
4505 struct ifnet *ifp = &sc->arpcom.ac_if;
4508 ASSERT_IFNET_SERIALIZED_ALL(ifp);
4510 if (sc->bnx_coal_chg & BNX_RX_COAL_TICKS_CHG) {
4511 if (sc->bnx_rx_retcnt == 1) {
4512 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS,
4513 sc->bnx_rx_coal_ticks);
4516 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, 0);
4517 for (i = 0; i < sc->bnx_rx_retcnt; ++i) {
4518 CSR_WRITE_4(sc, BGE_VEC1_RX_COAL_TICKS +
4519 (i * BGE_VEC_COALSET_SIZE),
4520 sc->bnx_rx_coal_ticks);
4523 for (; i < BNX_INTR_MAX - 1; ++i) {
4524 CSR_WRITE_4(sc, BGE_VEC1_RX_COAL_TICKS +
4525 (i * BGE_VEC_COALSET_SIZE), 0);
4528 if_printf(ifp, "rx_coal_ticks -> %u\n",
4529 sc->bnx_rx_coal_ticks);
4533 if (sc->bnx_coal_chg & BNX_TX_COAL_TICKS_CHG) {
4534 if (sc->bnx_tx_ringcnt == 1) {
4535 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS,
4536 sc->bnx_tx_coal_ticks);
4539 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, 0);
4540 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
4541 CSR_WRITE_4(sc, BGE_VEC1_TX_COAL_TICKS +
4542 (i * BGE_VEC_COALSET_SIZE),
4543 sc->bnx_tx_coal_ticks);
4546 for (; i < BNX_INTR_MAX - 1; ++i) {
4547 CSR_WRITE_4(sc, BGE_VEC1_TX_COAL_TICKS +
4548 (i * BGE_VEC_COALSET_SIZE), 0);
4551 if_printf(ifp, "tx_coal_ticks -> %u\n",
4552 sc->bnx_tx_coal_ticks);
4556 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_CHG) {
4557 uint32_t rx_coal_bds;
4559 if (ifp->if_flags & IFF_NPOLLING)
4560 rx_coal_bds = sc->bnx_rx_coal_bds_poll;
4562 rx_coal_bds = sc->bnx_rx_coal_bds;
4564 if (sc->bnx_rx_retcnt == 1) {
4565 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_coal_bds);
4568 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, 0);
4569 for (i = 0; i < sc->bnx_rx_retcnt; ++i) {
4570 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS +
4571 (i * BGE_VEC_COALSET_SIZE), rx_coal_bds);
4574 for (; i < BNX_INTR_MAX - 1; ++i) {
4575 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS +
4576 (i * BGE_VEC_COALSET_SIZE), 0);
4579 if_printf(ifp, "%srx_coal_bds -> %u\n",
4580 (ifp->if_flags & IFF_NPOLLING) ? "polling " : "",
4585 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_CHG) {
4586 uint32_t tx_coal_bds;
4588 if (ifp->if_flags & IFF_NPOLLING)
4589 tx_coal_bds = sc->bnx_tx_coal_bds_poll;
4591 tx_coal_bds = sc->bnx_tx_coal_bds;
4593 if (sc->bnx_tx_ringcnt == 1) {
4594 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, tx_coal_bds);
4597 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, 0);
4598 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
4599 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS +
4600 (i * BGE_VEC_COALSET_SIZE), tx_coal_bds);
4603 for (; i < BNX_INTR_MAX - 1; ++i) {
4604 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS +
4605 (i * BGE_VEC_COALSET_SIZE), 0);
4608 if_printf(ifp, "%stx_coal_bds -> %u\n",
4609 (ifp->if_flags & IFF_NPOLLING) ? "polling " : "",
4614 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_INT_CHG) {
4615 if (sc->bnx_rx_retcnt == 1) {
4616 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT,
4617 sc->bnx_rx_coal_bds_int);
4620 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
4621 for (i = 0; i < sc->bnx_rx_retcnt; ++i) {
4622 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS_INT +
4623 (i * BGE_VEC_COALSET_SIZE),
4624 sc->bnx_rx_coal_bds_int);
4627 for (; i < BNX_INTR_MAX - 1; ++i) {
4628 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS_INT +
4629 (i * BGE_VEC_COALSET_SIZE), 0);
4632 if_printf(ifp, "rx_coal_bds_int -> %u\n",
4633 sc->bnx_rx_coal_bds_int);
4637 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_INT_CHG) {
4638 if (sc->bnx_tx_ringcnt == 1) {
4639 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT,
4640 sc->bnx_tx_coal_bds_int);
4643 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
4644 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
4645 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS_INT +
4646 (i * BGE_VEC_COALSET_SIZE),
4647 sc->bnx_tx_coal_bds_int);
4650 for (; i < BNX_INTR_MAX - 1; ++i) {
4651 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS_INT +
4652 (i * BGE_VEC_COALSET_SIZE), 0);
4655 if_printf(ifp, "tx_coal_bds_int -> %u\n",
4656 sc->bnx_tx_coal_bds_int);
4660 sc->bnx_coal_chg = 0;
4664 bnx_check_intr_rxtx(void *xintr)
4666 struct bnx_intr_data *intr = xintr;
4667 struct bnx_rx_ret_ring *ret;
4668 struct bnx_tx_ring *txr;
4671 lwkt_serialize_enter(intr->bnx_intr_serialize);
4673 KKASSERT(mycpuid == intr->bnx_intr_cpuid);
4675 ifp = &intr->bnx_sc->arpcom.ac_if;
4676 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) {
4677 lwkt_serialize_exit(intr->bnx_intr_serialize);
4681 txr = intr->bnx_txr;
4682 ret = intr->bnx_ret;
4684 if (*ret->bnx_rx_considx != ret->bnx_rx_saved_considx ||
4685 *txr->bnx_tx_considx != txr->bnx_tx_saved_considx) {
4686 if (intr->bnx_rx_check_considx == ret->bnx_rx_saved_considx &&
4687 intr->bnx_tx_check_considx == txr->bnx_tx_saved_considx) {
4688 if (!intr->bnx_intr_maylose) {
4689 intr->bnx_intr_maylose = TRUE;
4693 if_printf(ifp, "lost interrupt\n");
4694 intr->bnx_intr_func(intr->bnx_intr_arg);
4697 intr->bnx_intr_maylose = FALSE;
4698 intr->bnx_rx_check_considx = ret->bnx_rx_saved_considx;
4699 intr->bnx_tx_check_considx = txr->bnx_tx_saved_considx;
4702 callout_reset(&intr->bnx_intr_timer, BNX_INTR_CKINTVL,
4703 intr->bnx_intr_check, intr);
4704 lwkt_serialize_exit(intr->bnx_intr_serialize);
4708 bnx_check_intr_tx(void *xintr)
4710 struct bnx_intr_data *intr = xintr;
4711 struct bnx_tx_ring *txr;
4714 lwkt_serialize_enter(intr->bnx_intr_serialize);
4716 KKASSERT(mycpuid == intr->bnx_intr_cpuid);
4718 ifp = &intr->bnx_sc->arpcom.ac_if;
4719 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) {
4720 lwkt_serialize_exit(intr->bnx_intr_serialize);
4724 txr = intr->bnx_txr;
4726 if (*txr->bnx_tx_considx != txr->bnx_tx_saved_considx) {
4727 if (intr->bnx_tx_check_considx == txr->bnx_tx_saved_considx) {
4728 if (!intr->bnx_intr_maylose) {
4729 intr->bnx_intr_maylose = TRUE;
4733 if_printf(ifp, "lost interrupt\n");
4734 intr->bnx_intr_func(intr->bnx_intr_arg);
4737 intr->bnx_intr_maylose = FALSE;
4738 intr->bnx_tx_check_considx = txr->bnx_tx_saved_considx;
4741 callout_reset(&intr->bnx_intr_timer, BNX_INTR_CKINTVL,
4742 intr->bnx_intr_check, intr);
4743 lwkt_serialize_exit(intr->bnx_intr_serialize);
4747 bnx_check_intr_rx(void *xintr)
4749 struct bnx_intr_data *intr = xintr;
4750 struct bnx_rx_ret_ring *ret;
4753 lwkt_serialize_enter(intr->bnx_intr_serialize);
4755 KKASSERT(mycpuid == intr->bnx_intr_cpuid);
4757 ifp = &intr->bnx_sc->arpcom.ac_if;
4758 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) {
4759 lwkt_serialize_exit(intr->bnx_intr_serialize);
4763 ret = intr->bnx_ret;
4765 if (*ret->bnx_rx_considx != ret->bnx_rx_saved_considx) {
4766 if (intr->bnx_rx_check_considx == ret->bnx_rx_saved_considx) {
4767 if (!intr->bnx_intr_maylose) {
4768 intr->bnx_intr_maylose = TRUE;
4772 if_printf(ifp, "lost interrupt\n");
4773 intr->bnx_intr_func(intr->bnx_intr_arg);
4776 intr->bnx_intr_maylose = FALSE;
4777 intr->bnx_rx_check_considx = ret->bnx_rx_saved_considx;
4780 callout_reset(&intr->bnx_intr_timer, BNX_INTR_CKINTVL,
4781 intr->bnx_intr_check, intr);
4782 lwkt_serialize_exit(intr->bnx_intr_serialize);
4786 bnx_enable_intr(struct bnx_softc *sc)
4788 struct ifnet *ifp = &sc->arpcom.ac_if;
4791 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
4792 lwkt_serialize_handler_enable(
4793 sc->bnx_intr_data[i].bnx_intr_serialize);
4799 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
4800 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
4802 bnx_writembx(sc, intr->bnx_intr_mbx,
4803 (*intr->bnx_saved_status_tag) << 24);
4804 /* XXX Linux driver */
4805 bnx_writembx(sc, intr->bnx_intr_mbx,
4806 (*intr->bnx_saved_status_tag) << 24);
4810 * Unmask the interrupt when we stop polling.
4812 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_MISC_CTL,
4813 BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
4816 * Trigger another interrupt, since above writing
4817 * to interrupt mailbox0 may acknowledge pending
4820 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4822 if (sc->bnx_flags & BNX_FLAG_STATUSTAG_BUG) {
4824 if_printf(ifp, "status tag bug workaround\n");
4826 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
4827 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
4829 if (intr->bnx_intr_check == NULL)
4831 intr->bnx_intr_maylose = FALSE;
4832 intr->bnx_rx_check_considx = 0;
4833 intr->bnx_tx_check_considx = 0;
4834 callout_reset_bycpu(&intr->bnx_intr_timer,
4835 BNX_INTR_CKINTVL, intr->bnx_intr_check, intr,
4836 intr->bnx_intr_cpuid);
4842 bnx_disable_intr(struct bnx_softc *sc)
4846 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
4847 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
4849 callout_stop(&intr->bnx_intr_timer);
4850 intr->bnx_intr_maylose = FALSE;
4851 intr->bnx_rx_check_considx = 0;
4852 intr->bnx_tx_check_considx = 0;
4856 * Mask the interrupt when we start polling.
4858 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL,
4859 BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
4862 * Acknowledge possible asserted interrupt.
4864 for (i = 0; i < BNX_INTR_MAX; ++i)
4865 bnx_writembx(sc, sc->bnx_intr_data[i].bnx_intr_mbx, 1);
4867 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
4868 lwkt_serialize_handler_disable(
4869 sc->bnx_intr_data[i].bnx_intr_serialize);
4874 bnx_get_eaddr_mem(struct bnx_softc *sc, uint8_t ether_addr[])
4879 mac_addr = bnx_readmem_ind(sc, 0x0c14);
4880 if ((mac_addr >> 16) == 0x484b) {
4881 ether_addr[0] = (uint8_t)(mac_addr >> 8);
4882 ether_addr[1] = (uint8_t)mac_addr;
4883 mac_addr = bnx_readmem_ind(sc, 0x0c18);
4884 ether_addr[2] = (uint8_t)(mac_addr >> 24);
4885 ether_addr[3] = (uint8_t)(mac_addr >> 16);
4886 ether_addr[4] = (uint8_t)(mac_addr >> 8);
4887 ether_addr[5] = (uint8_t)mac_addr;
4894 bnx_get_eaddr_nvram(struct bnx_softc *sc, uint8_t ether_addr[])
4896 int mac_offset = BGE_EE_MAC_OFFSET;
4898 if (BNX_IS_5717_PLUS(sc)) {
4901 f = pci_get_function(sc->bnx_dev);
4903 mac_offset = BGE_EE_MAC_OFFSET_5717;
4905 mac_offset += BGE_EE_MAC_OFFSET_5717_OFF;
4908 return bnx_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN);
4912 bnx_get_eaddr_eeprom(struct bnx_softc *sc, uint8_t ether_addr[])
4914 if (sc->bnx_flags & BNX_FLAG_NO_EEPROM)
4917 return bnx_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
4922 bnx_get_eaddr(struct bnx_softc *sc, uint8_t eaddr[])
4924 static const bnx_eaddr_fcn_t bnx_eaddr_funcs[] = {
4925 /* NOTE: Order is critical */
4927 bnx_get_eaddr_nvram,
4928 bnx_get_eaddr_eeprom,
4931 const bnx_eaddr_fcn_t *func;
4933 for (func = bnx_eaddr_funcs; *func != NULL; ++func) {
4934 if ((*func)(sc, eaddr) == 0)
4937 return (*func == NULL ? ENXIO : 0);
4941 * NOTE: 'm' is not freed upon failure
4944 bnx_defrag_shortdma(struct mbuf *m)
4950 * If device receive two back-to-back send BDs with less than
4951 * or equal to 8 total bytes then the device may hang. The two
4952 * back-to-back send BDs must in the same frame for this failure
4953 * to occur. Scan mbuf chains and see whether two back-to-back
4954 * send BDs are there. If this is the case, allocate new mbuf
4955 * and copy the frame to workaround the silicon bug.
4957 for (n = m, found = 0; n != NULL; n = n->m_next) {
4968 n = m_defrag(m, MB_DONTWAIT);
4975 bnx_stop_block(struct bnx_softc *sc, bus_size_t reg, uint32_t bit)
4979 BNX_CLRBIT(sc, reg, bit);
4980 for (i = 0; i < BNX_TIMEOUT; i++) {
4981 if ((CSR_READ_4(sc, reg) & bit) == 0)
4988 bnx_link_poll(struct bnx_softc *sc)
4992 status = CSR_READ_4(sc, BGE_MAC_STS);
4993 if ((status & sc->bnx_link_chg) || sc->bnx_link_evt) {
4994 sc->bnx_link_evt = 0;
4995 sc->bnx_link_upd(sc, status);
5000 bnx_enable_msi(struct bnx_softc *sc, boolean_t is_msix)
5004 msi_mode = CSR_READ_4(sc, BGE_MSI_MODE);
5005 msi_mode |= BGE_MSIMODE_ENABLE;
5008 * 5718-PG105-R says that "one shot" mode does not work
5009 * if MSI is used, however, it obviously works.
5011 msi_mode &= ~BGE_MSIMODE_ONESHOT_DISABLE;
5013 msi_mode |= BGE_MSIMODE_MSIX_MULTIMODE;
5015 msi_mode &= ~BGE_MSIMODE_MSIX_MULTIMODE;
5016 CSR_WRITE_4(sc, BGE_MSI_MODE, msi_mode);
5020 bnx_dma_swap_options(struct bnx_softc *sc)
5022 uint32_t dma_options;
5024 dma_options = BGE_MODECTL_WORDSWAP_NONFRAME |
5025 BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA;
5026 #if BYTE_ORDER == BIG_ENDIAN
5027 dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME;
5029 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
5030 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
5031 dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA |
5032 BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE |
5033 BGE_MODECTL_HTX2B_ENABLE;
5039 bnx_setup_tso(struct bnx_tx_ring *txr, struct mbuf **mp,
5040 uint16_t *mss0, uint16_t *flags0)
5045 int thoff, iphlen, hoff, hlen;
5046 uint16_t flags, mss;
5049 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
5051 hoff = m->m_pkthdr.csum_lhlen;
5052 iphlen = m->m_pkthdr.csum_iphlen;
5053 thoff = m->m_pkthdr.csum_thlen;
5055 KASSERT(hoff > 0, ("invalid ether header len"));
5056 KASSERT(iphlen > 0, ("invalid ip header len"));
5057 KASSERT(thoff > 0, ("invalid tcp header len"));
5059 if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
5060 m = m_pullup(m, hoff + iphlen + thoff);
5067 ip = mtodoff(m, struct ip *, hoff);
5068 th = mtodoff(m, struct tcphdr *, hoff + iphlen);
5070 mss = m->m_pkthdr.tso_segsz;
5071 flags = BGE_TXBDFLAG_CPU_PRE_DMA | BGE_TXBDFLAG_CPU_POST_DMA;
5073 ip->ip_len = htons(mss + iphlen + thoff);
5076 hlen = (iphlen + thoff) >> 2;
5077 mss |= ((hlen & 0x3) << 14);
5078 flags |= ((hlen & 0xf8) << 7) | ((hlen & 0x4) << 2);
5087 bnx_create_tx_ring(struct bnx_tx_ring *txr)
5089 bus_size_t txmaxsz, txmaxsegsz;
5092 lwkt_serialize_init(&txr->bnx_tx_serialize);
5095 * Create DMA tag and maps for TX mbufs.
5097 if (txr->bnx_sc->bnx_flags & BNX_FLAG_TSO)
5098 txmaxsz = IP_MAXPACKET + sizeof(struct ether_vlan_header);
5100 txmaxsz = BNX_JUMBO_FRAMELEN;
5101 if (txr->bnx_sc->bnx_asicrev == BGE_ASICREV_BCM57766)
5102 txmaxsegsz = MCLBYTES;
5104 txmaxsegsz = PAGE_SIZE;
5105 error = bus_dma_tag_create(txr->bnx_sc->bnx_cdata.bnx_parent_tag,
5106 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
5107 txmaxsz, BNX_NSEG_NEW, txmaxsegsz,
5108 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
5111 device_printf(txr->bnx_sc->bnx_dev,
5112 "could not create TX mbuf DMA tag\n");
5116 for (i = 0; i < BGE_TX_RING_CNT; i++) {
5117 error = bus_dmamap_create(txr->bnx_tx_mtag,
5118 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
5119 &txr->bnx_tx_buf[i].bnx_tx_dmamap);
5123 for (j = 0; j < i; ++j) {
5124 bus_dmamap_destroy(txr->bnx_tx_mtag,
5125 txr->bnx_tx_buf[j].bnx_tx_dmamap);
5127 bus_dma_tag_destroy(txr->bnx_tx_mtag);
5128 txr->bnx_tx_mtag = NULL;
5130 device_printf(txr->bnx_sc->bnx_dev,
5131 "could not create TX mbuf DMA map\n");
5137 * Create DMA stuffs for TX ring.
5139 error = bnx_dma_block_alloc(txr->bnx_sc, BGE_TX_RING_SZ,
5140 &txr->bnx_tx_ring_tag,
5141 &txr->bnx_tx_ring_map,
5142 (void *)&txr->bnx_tx_ring,
5143 &txr->bnx_tx_ring_paddr);
5145 device_printf(txr->bnx_sc->bnx_dev,
5146 "could not create TX ring\n");
5150 txr->bnx_tx_flags |= BNX_TX_FLAG_SHORTDMA;
5151 txr->bnx_tx_wreg = BNX_TX_WREG_NSEGS;
5157 bnx_destroy_tx_ring(struct bnx_tx_ring *txr)
5159 /* Destroy TX mbuf DMA stuffs. */
5160 if (txr->bnx_tx_mtag != NULL) {
5163 for (i = 0; i < BGE_TX_RING_CNT; i++) {
5164 KKASSERT(txr->bnx_tx_buf[i].bnx_tx_mbuf == NULL);
5165 bus_dmamap_destroy(txr->bnx_tx_mtag,
5166 txr->bnx_tx_buf[i].bnx_tx_dmamap);
5168 bus_dma_tag_destroy(txr->bnx_tx_mtag);
5171 /* Destroy TX ring */
5172 bnx_dma_block_free(txr->bnx_tx_ring_tag,
5173 txr->bnx_tx_ring_map, txr->bnx_tx_ring);
5177 bnx_sysctl_force_defrag(SYSCTL_HANDLER_ARGS)
5179 struct bnx_softc *sc = (void *)arg1;
5180 struct ifnet *ifp = &sc->arpcom.ac_if;
5181 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0];
5182 int error, defrag, i;
5184 if (txr->bnx_tx_flags & BNX_TX_FLAG_FORCE_DEFRAG)
5189 error = sysctl_handle_int(oidp, &defrag, 0, req);
5190 if (error || req->newptr == NULL)
5193 ifnet_serialize_all(ifp);
5194 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
5195 txr = &sc->bnx_tx_ring[i];
5197 txr->bnx_tx_flags |= BNX_TX_FLAG_FORCE_DEFRAG;
5199 txr->bnx_tx_flags &= ~BNX_TX_FLAG_FORCE_DEFRAG;
5201 ifnet_deserialize_all(ifp);
5207 bnx_sysctl_tx_wreg(SYSCTL_HANDLER_ARGS)
5209 struct bnx_softc *sc = (void *)arg1;
5210 struct ifnet *ifp = &sc->arpcom.ac_if;
5211 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0];
5212 int error, tx_wreg, i;
5214 tx_wreg = txr->bnx_tx_wreg;
5215 error = sysctl_handle_int(oidp, &tx_wreg, 0, req);
5216 if (error || req->newptr == NULL)
5219 ifnet_serialize_all(ifp);
5220 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
5221 sc->bnx_tx_ring[i].bnx_tx_wreg = tx_wreg;
5222 ifnet_deserialize_all(ifp);
5228 bnx_create_rx_ret_ring(struct bnx_rx_ret_ring *ret)
5232 lwkt_serialize_init(&ret->bnx_rx_ret_serialize);
5235 * Create DMA stuffs for RX return ring.
5237 error = bnx_dma_block_alloc(ret->bnx_sc,
5238 BGE_RX_RTN_RING_SZ(BNX_RETURN_RING_CNT),
5239 &ret->bnx_rx_ret_ring_tag,
5240 &ret->bnx_rx_ret_ring_map,
5241 (void *)&ret->bnx_rx_ret_ring,
5242 &ret->bnx_rx_ret_ring_paddr);
5244 device_printf(ret->bnx_sc->bnx_dev,
5245 "could not create RX ret ring\n");
5249 /* Shadow standard ring's RX mbuf DMA tag */
5250 ret->bnx_rx_mtag = ret->bnx_std->bnx_rx_mtag;
5253 * Create tmp DMA map for RX mbufs.
5255 error = bus_dmamap_create(ret->bnx_rx_mtag, BUS_DMA_WAITOK,
5256 &ret->bnx_rx_tmpmap);
5258 device_printf(ret->bnx_sc->bnx_dev,
5259 "could not create tmp RX mbuf DMA map\n");
5260 ret->bnx_rx_mtag = NULL;
5267 bnx_destroy_rx_ret_ring(struct bnx_rx_ret_ring *ret)
5269 /* Destroy tmp RX mbuf DMA map */
5270 if (ret->bnx_rx_mtag != NULL)
5271 bus_dmamap_destroy(ret->bnx_rx_mtag, ret->bnx_rx_tmpmap);
5273 /* Destroy RX return ring */
5274 bnx_dma_block_free(ret->bnx_rx_ret_ring_tag,
5275 ret->bnx_rx_ret_ring_map, ret->bnx_rx_ret_ring);
5279 bnx_alloc_intr(struct bnx_softc *sc)
5281 struct bnx_intr_data *intr;
5285 if (sc->bnx_intr_cnt > 1) {
5286 error = bnx_alloc_msix(sc);
5289 KKASSERT(sc->bnx_intr_type == PCI_INTR_TYPE_MSIX);
5293 KKASSERT(sc->bnx_intr_cnt == 1);
5295 intr = &sc->bnx_intr_data[0];
5296 intr->bnx_ret = &sc->bnx_rx_ret_ring[0];
5297 intr->bnx_txr = &sc->bnx_tx_ring[0];
5298 intr->bnx_intr_serialize = &sc->bnx_main_serialize;
5299 intr->bnx_intr_check = bnx_check_intr_rxtx;
5300 intr->bnx_saved_status_tag = &intr->bnx_ret->bnx_saved_status_tag;
5302 sc->bnx_intr_type = pci_alloc_1intr(sc->bnx_dev, bnx_msi_enable,
5303 &intr->bnx_intr_rid, &intr_flags);
5305 intr->bnx_intr_res = bus_alloc_resource_any(sc->bnx_dev, SYS_RES_IRQ,
5306 &intr->bnx_intr_rid, intr_flags);
5307 if (intr->bnx_intr_res == NULL) {
5308 device_printf(sc->bnx_dev, "could not alloc interrupt\n");
5312 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI) {
5313 bnx_enable_msi(sc, FALSE);
5314 intr->bnx_intr_func = bnx_msi;
5316 device_printf(sc->bnx_dev, "oneshot MSI\n");
5318 intr->bnx_intr_func = bnx_intr_legacy;
5320 intr->bnx_intr_arg = sc;
5321 intr->bnx_intr_cpuid = rman_get_cpuid(intr->bnx_intr_res);
5323 intr->bnx_txr->bnx_tx_cpuid = intr->bnx_intr_cpuid;
5329 bnx_setup_intr(struct bnx_softc *sc)
5333 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
5334 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
5336 error = bus_setup_intr_descr(sc->bnx_dev, intr->bnx_intr_res,
5337 INTR_MPSAFE, intr->bnx_intr_func, intr->bnx_intr_arg,
5338 &intr->bnx_intr_hand, intr->bnx_intr_serialize,
5339 intr->bnx_intr_desc);
5341 device_printf(sc->bnx_dev,
5342 "could not set up %dth intr\n", i);
5343 bnx_teardown_intr(sc, i);
5351 bnx_teardown_intr(struct bnx_softc *sc, int cnt)
5355 for (i = 0; i < cnt; ++i) {
5356 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
5358 bus_teardown_intr(sc->bnx_dev, intr->bnx_intr_res,
5359 intr->bnx_intr_hand);
5364 bnx_free_intr(struct bnx_softc *sc)
5366 if (sc->bnx_intr_type != PCI_INTR_TYPE_MSIX) {
5367 struct bnx_intr_data *intr;
5369 KKASSERT(sc->bnx_intr_cnt <= 1);
5370 intr = &sc->bnx_intr_data[0];
5372 if (intr->bnx_intr_res != NULL) {
5373 bus_release_resource(sc->bnx_dev, SYS_RES_IRQ,
5374 intr->bnx_intr_rid, intr->bnx_intr_res);
5376 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI)
5377 pci_release_msi(sc->bnx_dev);
5379 bnx_free_msix(sc, TRUE);
5384 bnx_setup_serialize(struct bnx_softc *sc)
5389 * Allocate serializer array
5392 /* Main + RX STD + TX + RX RET */
5393 sc->bnx_serialize_cnt = 1 + 1 + sc->bnx_tx_ringcnt + sc->bnx_rx_retcnt;
5396 kmalloc(sc->bnx_serialize_cnt * sizeof(struct lwkt_serialize *),
5397 M_DEVBUF, M_WAITOK | M_ZERO);
5402 * NOTE: Order is critical
5407 KKASSERT(i < sc->bnx_serialize_cnt);
5408 sc->bnx_serialize[i++] = &sc->bnx_main_serialize;
5410 KKASSERT(i < sc->bnx_serialize_cnt);
5411 sc->bnx_serialize[i++] = &sc->bnx_rx_std_ring.bnx_rx_std_serialize;
5413 for (j = 0; j < sc->bnx_rx_retcnt; ++j) {
5414 KKASSERT(i < sc->bnx_serialize_cnt);
5415 sc->bnx_serialize[i++] =
5416 &sc->bnx_rx_ret_ring[j].bnx_rx_ret_serialize;
5419 for (j = 0; j < sc->bnx_tx_ringcnt; ++j) {
5420 KKASSERT(i < sc->bnx_serialize_cnt);
5421 sc->bnx_serialize[i++] =
5422 &sc->bnx_tx_ring[j].bnx_tx_serialize;
5425 KKASSERT(i == sc->bnx_serialize_cnt);
5429 bnx_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
5431 struct bnx_softc *sc = ifp->if_softc;
5433 ifnet_serialize_array_enter(sc->bnx_serialize,
5434 sc->bnx_serialize_cnt, slz);
5438 bnx_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
5440 struct bnx_softc *sc = ifp->if_softc;
5442 ifnet_serialize_array_exit(sc->bnx_serialize,
5443 sc->bnx_serialize_cnt, slz);
5447 bnx_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
5449 struct bnx_softc *sc = ifp->if_softc;
5451 return ifnet_serialize_array_try(sc->bnx_serialize,
5452 sc->bnx_serialize_cnt, slz);
5458 bnx_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
5459 boolean_t serialized)
5461 struct bnx_softc *sc = ifp->if_softc;
5463 ifnet_serialize_array_assert(sc->bnx_serialize, sc->bnx_serialize_cnt,
5467 #endif /* INVARIANTS */
5469 #ifdef IFPOLL_ENABLE
5472 bnx_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS)
5474 struct bnx_softc *sc = (void *)arg1;
5475 struct ifnet *ifp = &sc->arpcom.ac_if;
5478 off = sc->bnx_npoll_rxoff;
5479 error = sysctl_handle_int(oidp, &off, 0, req);
5480 if (error || req->newptr == NULL)
5485 ifnet_serialize_all(ifp);
5486 if (off >= ncpus2 || off % sc->bnx_rx_retcnt != 0) {
5490 sc->bnx_npoll_txoff = off;
5491 sc->bnx_npoll_rxoff = off;
5493 ifnet_deserialize_all(ifp);
5499 bnx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS)
5501 struct bnx_softc *sc = (void *)arg1;
5502 struct ifnet *ifp = &sc->arpcom.ac_if;
5505 off = sc->bnx_npoll_rxoff;
5506 error = sysctl_handle_int(oidp, &off, 0, req);
5507 if (error || req->newptr == NULL)
5512 ifnet_serialize_all(ifp);
5513 if (off >= ncpus2 || off % sc->bnx_rx_retcnt != 0) {
5517 sc->bnx_npoll_rxoff = off;
5519 ifnet_deserialize_all(ifp);
5525 bnx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS)
5527 struct bnx_softc *sc = (void *)arg1;
5528 struct ifnet *ifp = &sc->arpcom.ac_if;
5531 off = sc->bnx_npoll_txoff;
5532 error = sysctl_handle_int(oidp, &off, 0, req);
5533 if (error || req->newptr == NULL)
5538 ifnet_serialize_all(ifp);
5539 if (off >= ncpus2) {
5543 sc->bnx_npoll_txoff = off;
5545 ifnet_deserialize_all(ifp);
5550 #endif /* IFPOLL_ENABLE */
5553 bnx_set_tick_cpuid(struct bnx_softc *sc, boolean_t polling)
5556 sc->bnx_tick_cpuid = 0; /* XXX */
5558 sc->bnx_tick_cpuid = sc->bnx_intr_data[0].bnx_intr_cpuid;
5562 bnx_rx_std_refill_ithread(void *xstd)
5564 struct bnx_rx_std_ring *std = xstd;
5565 struct globaldata *gd = mycpu;
5569 while (!std->bnx_rx_std_stop) {
5570 if (std->bnx_rx_std_refill) {
5571 lwkt_serialize_handler_call(
5572 &std->bnx_rx_std_serialize,
5573 bnx_rx_std_refill, std, NULL);
5579 atomic_poll_release_int(&std->bnx_rx_std_running);
5582 if (!std->bnx_rx_std_refill && !std->bnx_rx_std_stop) {
5583 lwkt_deschedule_self(gd->gd_curthread);
5596 bnx_rx_std_refill(void *xstd, void *frame __unused)
5598 struct bnx_rx_std_ring *std = xstd;
5599 int cnt, refill_mask;
5605 refill_mask = std->bnx_rx_std_refill;
5606 atomic_clear_int(&std->bnx_rx_std_refill, refill_mask);
5608 while (refill_mask) {
5609 uint16_t check_idx = std->bnx_rx_std;
5612 ret_idx = bsfl(refill_mask);
5614 struct bnx_rx_buf *rb;
5617 BNX_INC(check_idx, BGE_STD_RX_RING_CNT);
5618 rb = &std->bnx_rx_std_buf[check_idx];
5619 refilled = rb->bnx_rx_refilled;
5622 bnx_setup_rxdesc_std(std, check_idx);
5623 std->bnx_rx_std = check_idx;
5626 atomic_subtract_int(
5627 &std->bnx_rx_std_used, cnt);
5628 bnx_writembx(std->bnx_sc,
5629 BGE_MBX_RX_STD_PROD_LO,
5637 refill_mask &= ~(1 << ret_idx);
5641 atomic_subtract_int(&std->bnx_rx_std_used, cnt);
5642 bnx_writembx(std->bnx_sc, BGE_MBX_RX_STD_PROD_LO,
5646 if (std->bnx_rx_std_refill)
5649 atomic_poll_release_int(&std->bnx_rx_std_running);
5652 if (std->bnx_rx_std_refill)
5657 bnx_sysctl_std_refill(SYSCTL_HANDLER_ARGS)
5659 struct bnx_softc *sc = (void *)arg1;
5660 struct ifnet *ifp = &sc->arpcom.ac_if;
5661 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0];
5662 int error, cntmax, i;
5664 cntmax = ret->bnx_rx_cntmax;
5665 error = sysctl_handle_int(oidp, &cntmax, 0, req);
5666 if (error || req->newptr == NULL)
5669 ifnet_serialize_all(ifp);
5671 if ((cntmax * sc->bnx_rx_retcnt) >= BGE_STD_RX_RING_CNT / 2) {
5676 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
5677 sc->bnx_rx_ret_ring[i].bnx_rx_cntmax = cntmax;
5681 ifnet_deserialize_all(ifp);
5687 bnx_init_rss(struct bnx_softc *sc)
5689 uint8_t key[BGE_RSS_KEYREG_CNT * BGE_RSS_KEYREG_SIZE];
5692 KKASSERT(BNX_RSS_ENABLED(sc));
5695 * Configure RSS redirect table in following fashion:
5696 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
5699 for (j = 0; j < BGE_RSS_INDIR_TBL_CNT; ++j) {
5702 for (i = 0; i < BGE_RSS_INDIR_TBLENT_CNT; ++i) {
5705 q = r % sc->bnx_rx_retcnt;
5706 tbl |= q << (BGE_RSS_INDIR_TBLENT_SHIFT *
5707 (BGE_RSS_INDIR_TBLENT_CNT - i - 1));
5711 BNX_RSS_DPRINTF(sc, 1, "tbl%d %08x\n", j, tbl);
5712 CSR_WRITE_4(sc, BGE_RSS_INDIR_TBL(j), tbl);
5715 toeplitz_get_key(key, sizeof(key));
5716 for (i = 0; i < BGE_RSS_KEYREG_CNT; ++i) {
5719 keyreg = BGE_RSS_KEYREG_VAL(key, i);
5721 BNX_RSS_DPRINTF(sc, 1, "key%d %08x\n", i, keyreg);
5722 CSR_WRITE_4(sc, BGE_RSS_KEYREG(i), keyreg);
5727 bnx_setup_ring_cnt(struct bnx_softc *sc)
5729 int msix_enable, i, msix_cnt, msix_cnt2, ring_max;
5731 sc->bnx_tx_ringcnt = 1;
5732 sc->bnx_rx_retcnt = 1;
5733 sc->bnx_intr_cnt = 1;
5735 msix_enable = device_getenv_int(sc->bnx_dev, "msix.enable",
5743 msix_cnt = pci_msix_count(sc->bnx_dev);
5748 while ((1 << (i + 1)) <= msix_cnt)
5753 * One MSI-X vector is dedicated to status or single TX queue,
5754 * so make sure that there are enough MSI-X vectors.
5756 if (msix_cnt == msix_cnt2) {
5759 * This probably will not happen; 57785/5718 families
5760 * come with at least 5 MSI-X vectors.
5763 if (msix_cnt2 <= 1) {
5764 device_printf(sc->bnx_dev,
5765 "MSI-X count %d could not be used\n", msix_cnt);
5768 device_printf(sc->bnx_dev, "MSI-X count %d is power of 2\n",
5773 * Setup RX ring count
5775 ring_max = BNX_RX_RING_MAX;
5776 if (ring_max > msix_cnt2)
5777 ring_max = msix_cnt2;
5778 sc->bnx_rx_retcnt = device_getenv_int(sc->bnx_dev, "rx_rings",
5780 sc->bnx_rx_retcnt = if_ring_count2(sc->bnx_rx_retcnt, ring_max);
5782 if (sc->bnx_rx_retcnt == 1)
5786 * We need one extra MSI-X vector for link status or
5787 * TX ring (if only one TX ring is enabled).
5789 sc->bnx_intr_cnt = sc->bnx_rx_retcnt + 1;
5792 * Setup TX ring count
5794 * Currently only BCM5719 and BCM5720 support multiple TX rings
5795 * and the TX ring count must be less than the RX ring count.
5797 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
5798 sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
5799 ring_max = BNX_TX_RING_MAX;
5800 if (ring_max > msix_cnt2)
5801 ring_max = msix_cnt2;
5802 if (ring_max > sc->bnx_rx_retcnt)
5803 ring_max = sc->bnx_rx_retcnt;
5804 sc->bnx_tx_ringcnt = device_getenv_int(sc->bnx_dev, "tx_rings",
5806 sc->bnx_tx_ringcnt = if_ring_count2(sc->bnx_tx_ringcnt,
5812 bnx_alloc_msix(struct bnx_softc *sc)
5814 struct bnx_intr_data *intr;
5815 boolean_t setup = FALSE;
5816 int error, i, offset, offset_def;
5818 KKASSERT(sc->bnx_intr_cnt > 1);
5819 KKASSERT(sc->bnx_intr_cnt == sc->bnx_rx_retcnt + 1);
5821 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) {
5825 intr = &sc->bnx_intr_data[0];
5827 intr->bnx_intr_serialize = &sc->bnx_main_serialize;
5828 intr->bnx_saved_status_tag = &sc->bnx_saved_status_tag;
5830 intr->bnx_intr_func = bnx_msix_status;
5831 intr->bnx_intr_arg = sc;
5832 intr->bnx_intr_cpuid = 0; /* XXX */
5834 ksnprintf(intr->bnx_intr_desc0, sizeof(intr->bnx_intr_desc0),
5835 "%s sts", device_get_nameunit(sc->bnx_dev));
5836 intr->bnx_intr_desc = intr->bnx_intr_desc0;
5841 if (sc->bnx_rx_retcnt == ncpus2) {
5844 offset_def = (sc->bnx_rx_retcnt *
5845 device_get_unit(sc->bnx_dev)) % ncpus2;
5847 offset = device_getenv_int(sc->bnx_dev,
5848 "msix.offset", offset_def);
5849 if (offset >= ncpus2 ||
5850 offset % sc->bnx_rx_retcnt != 0) {
5851 device_printf(sc->bnx_dev,
5852 "invalid msix.offset %d, use %d\n",
5853 offset, offset_def);
5854 offset = offset_def;
5858 for (i = 1; i < sc->bnx_intr_cnt; ++i) {
5861 intr = &sc->bnx_intr_data[i];
5863 KKASSERT(idx < sc->bnx_rx_retcnt);
5864 intr->bnx_ret = &sc->bnx_rx_ret_ring[idx];
5865 if (idx < sc->bnx_tx_ringcnt) {
5866 intr->bnx_txr = &sc->bnx_tx_ring[idx];
5867 intr->bnx_ret->bnx_txr = intr->bnx_txr;
5870 intr->bnx_intr_serialize =
5871 &intr->bnx_ret->bnx_rx_ret_serialize;
5872 intr->bnx_saved_status_tag =
5873 &intr->bnx_ret->bnx_saved_status_tag;
5875 intr->bnx_intr_arg = intr->bnx_ret;
5876 KKASSERT(idx + offset < ncpus2);
5877 intr->bnx_intr_cpuid = idx + offset;
5879 if (intr->bnx_txr == NULL) {
5880 intr->bnx_intr_check = bnx_check_intr_rx;
5881 intr->bnx_intr_func = bnx_msix_rx;
5882 ksnprintf(intr->bnx_intr_desc0,
5883 sizeof(intr->bnx_intr_desc0), "%s rx%d",
5884 device_get_nameunit(sc->bnx_dev), idx);
5886 intr->bnx_intr_check = bnx_check_intr_rxtx;
5887 intr->bnx_intr_func = bnx_msix_rxtx;
5888 ksnprintf(intr->bnx_intr_desc0,
5889 sizeof(intr->bnx_intr_desc0), "%s rxtx%d",
5890 device_get_nameunit(sc->bnx_dev), idx);
5892 intr->bnx_txr->bnx_tx_cpuid =
5893 intr->bnx_intr_cpuid;
5895 intr->bnx_intr_desc = intr->bnx_intr_desc0;
5897 intr->bnx_ret->bnx_msix_mbx = intr->bnx_intr_mbx;
5901 * TX ring and link status
5903 offset_def = device_get_unit(sc->bnx_dev) % ncpus2;
5904 offset = device_getenv_int(sc->bnx_dev, "msix.txoff",
5906 if (offset >= ncpus2) {
5907 device_printf(sc->bnx_dev,
5908 "invalid msix.txoff %d, use %d\n",
5909 offset, offset_def);
5910 offset = offset_def;
5913 intr = &sc->bnx_intr_data[0];
5915 intr->bnx_txr = &sc->bnx_tx_ring[0];
5916 intr->bnx_intr_serialize = &sc->bnx_main_serialize;
5917 intr->bnx_intr_check = bnx_check_intr_tx;
5918 intr->bnx_saved_status_tag =
5919 &intr->bnx_txr->bnx_saved_status_tag;
5921 intr->bnx_intr_func = bnx_msix_tx_status;
5922 intr->bnx_intr_arg = intr->bnx_txr;
5923 intr->bnx_intr_cpuid = offset;
5925 ksnprintf(intr->bnx_intr_desc0, sizeof(intr->bnx_intr_desc0),
5926 "%s ststx", device_get_nameunit(sc->bnx_dev));
5927 intr->bnx_intr_desc = intr->bnx_intr_desc0;
5929 intr->bnx_txr->bnx_tx_cpuid = intr->bnx_intr_cpuid;
5934 if (sc->bnx_rx_retcnt == ncpus2) {
5937 offset_def = (sc->bnx_rx_retcnt *
5938 device_get_unit(sc->bnx_dev)) % ncpus2;
5940 offset = device_getenv_int(sc->bnx_dev,
5941 "msix.rxoff", offset_def);
5942 if (offset >= ncpus2 ||
5943 offset % sc->bnx_rx_retcnt != 0) {
5944 device_printf(sc->bnx_dev,
5945 "invalid msix.rxoff %d, use %d\n",
5946 offset, offset_def);
5947 offset = offset_def;
5951 for (i = 1; i < sc->bnx_intr_cnt; ++i) {
5954 intr = &sc->bnx_intr_data[i];
5956 KKASSERT(idx < sc->bnx_rx_retcnt);
5957 intr->bnx_ret = &sc->bnx_rx_ret_ring[idx];
5958 intr->bnx_intr_serialize =
5959 &intr->bnx_ret->bnx_rx_ret_serialize;
5960 intr->bnx_intr_check = bnx_check_intr_rx;
5961 intr->bnx_saved_status_tag =
5962 &intr->bnx_ret->bnx_saved_status_tag;
5964 intr->bnx_intr_func = bnx_msix_rx;
5965 intr->bnx_intr_arg = intr->bnx_ret;
5966 KKASSERT(idx + offset < ncpus2);
5967 intr->bnx_intr_cpuid = idx + offset;
5969 ksnprintf(intr->bnx_intr_desc0,
5970 sizeof(intr->bnx_intr_desc0), "%s rx%d",
5971 device_get_nameunit(sc->bnx_dev), idx);
5972 intr->bnx_intr_desc = intr->bnx_intr_desc0;
5974 intr->bnx_ret->bnx_msix_mbx = intr->bnx_intr_mbx;
5978 sc->bnx_msix_mem_rid = PCIR_BAR(4);
5979 sc->bnx_msix_mem_res = bus_alloc_resource_any(sc->bnx_dev,
5980 SYS_RES_MEMORY, &sc->bnx_msix_mem_rid, RF_ACTIVE);
5981 if (sc->bnx_msix_mem_res == NULL) {
5982 device_printf(sc->bnx_dev, "could not alloc MSI-X table\n");
5986 bnx_enable_msi(sc, TRUE);
5988 error = pci_setup_msix(sc->bnx_dev);
5990 device_printf(sc->bnx_dev, "could not setup MSI-X\n");
5995 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
5996 intr = &sc->bnx_intr_data[i];
5998 error = pci_alloc_msix_vector(sc->bnx_dev, i,
5999 &intr->bnx_intr_rid, intr->bnx_intr_cpuid);
6001 device_printf(sc->bnx_dev,
6002 "could not alloc MSI-X %d on cpu%d\n",
6003 i, intr->bnx_intr_cpuid);
6007 intr->bnx_intr_res = bus_alloc_resource_any(sc->bnx_dev,
6008 SYS_RES_IRQ, &intr->bnx_intr_rid, RF_ACTIVE);
6009 if (intr->bnx_intr_res == NULL) {
6010 device_printf(sc->bnx_dev,
6011 "could not alloc MSI-X %d resource\n", i);
6017 pci_enable_msix(sc->bnx_dev);
6018 sc->bnx_intr_type = PCI_INTR_TYPE_MSIX;
6021 bnx_free_msix(sc, setup);
6026 bnx_free_msix(struct bnx_softc *sc, boolean_t setup)
6030 KKASSERT(sc->bnx_intr_cnt > 1);
6032 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
6033 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
6035 if (intr->bnx_intr_res != NULL) {
6036 bus_release_resource(sc->bnx_dev, SYS_RES_IRQ,
6037 intr->bnx_intr_rid, intr->bnx_intr_res);
6039 if (intr->bnx_intr_rid >= 0) {
6040 pci_release_msix_vector(sc->bnx_dev,
6041 intr->bnx_intr_rid);
6045 pci_teardown_msix(sc->bnx_dev);
6049 bnx_rx_std_refill_sched_ipi(void *xret)
6051 struct bnx_rx_ret_ring *ret = xret;
6052 struct bnx_rx_std_ring *std = ret->bnx_std;
6053 struct globaldata *gd = mycpu;
6057 atomic_set_int(&std->bnx_rx_std_refill, ret->bnx_rx_mask);
6060 KKASSERT(std->bnx_rx_std_ithread.td_gd == gd);
6061 lwkt_schedule(&std->bnx_rx_std_ithread);
6067 bnx_rx_std_refill_stop(void *xstd)
6069 struct bnx_rx_std_ring *std = xstd;
6070 struct globaldata *gd = mycpu;
6074 std->bnx_rx_std_stop = 1;
6077 KKASSERT(std->bnx_rx_std_ithread.td_gd == gd);
6078 lwkt_schedule(&std->bnx_rx_std_ithread);
6084 bnx_serialize_skipmain(struct bnx_softc *sc)
6086 lwkt_serialize_array_enter(sc->bnx_serialize,
6087 sc->bnx_serialize_cnt, 1);
6091 bnx_deserialize_skipmain(struct bnx_softc *sc)
6093 lwkt_serialize_array_exit(sc->bnx_serialize,
6094 sc->bnx_serialize_cnt, 1);
6098 bnx_rx_std_refill_sched(struct bnx_rx_ret_ring *ret,
6099 struct bnx_rx_std_ring *std)
6101 struct globaldata *gd = mycpu;
6103 ret->bnx_rx_cnt = 0;
6108 atomic_set_int(&std->bnx_rx_std_refill, ret->bnx_rx_mask);
6110 if (atomic_poll_acquire_int(&std->bnx_rx_std_running)) {
6111 if (std->bnx_rx_std_ithread.td_gd == gd) {
6112 lwkt_schedule(&std->bnx_rx_std_ithread);
6115 std->bnx_rx_std_ithread.td_gd,
6116 bnx_rx_std_refill_sched_ipi, ret);
6123 static struct pktinfo *
6124 bnx_rss_info(struct pktinfo *pi, const struct bge_rx_bd *cur_rx)
6126 /* Don't pick up IPv6 packet */
6127 if (cur_rx->bge_flags & BGE_RXBDFLAG_IPV6)
6130 /* Don't pick up IP packet w/o IP checksum */
6131 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) == 0 ||
6132 (cur_rx->bge_error_flag & BGE_RXERRFLAG_IP_CSUM_NOK))
6135 /* Don't pick up IP packet w/o TCP/UDP checksum */
6136 if ((cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) == 0)
6139 /* May be IP fragment */
6140 if (cur_rx->bge_tcp_udp_csum != 0xffff)
6143 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_IS_TCP)
6144 pi->pi_l3proto = IPPROTO_TCP;
6146 pi->pi_l3proto = IPPROTO_UDP;
6147 pi->pi_netisr = NETISR_IP;