2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
37 #include "opt_ifpoll.h"
39 #include <sys/param.h>
41 #include <sys/endian.h>
42 #include <sys/kernel.h>
43 #include <sys/interrupt.h>
45 #include <sys/malloc.h>
46 #include <sys/queue.h>
48 #include <sys/serialize.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/sysctl.h>
53 #include <netinet/ip.h>
54 #include <netinet/tcp.h>
57 #include <net/ethernet.h>
59 #include <net/if_arp.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/if_poll.h>
63 #include <net/if_types.h>
64 #include <net/ifq_var.h>
65 #include <net/toeplitz.h>
66 #include <net/toeplitz2.h>
67 #include <net/vlan/if_vlan_var.h>
68 #include <net/vlan/if_vlan_ether.h>
70 #include <dev/netif/mii_layer/mii.h>
71 #include <dev/netif/mii_layer/miivar.h>
72 #include <dev/netif/mii_layer/brgphyreg.h>
75 #include <bus/pci/pcireg.h>
76 #include <bus/pci/pcivar.h>
78 #include <dev/netif/bge/if_bgereg.h>
79 #include <dev/netif/bnx/if_bnxvar.h>
81 /* "device miibus" required. See GENERIC if you get errors here. */
82 #include "miibus_if.h"
84 #define BNX_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
86 #define BNX_RESET_SHUTDOWN 0
87 #define BNX_RESET_START 1
88 #define BNX_RESET_SUSPEND 2
90 #define BNX_INTR_CKINTVL ((10 * hz) / 1000) /* 10ms */
93 #define BNX_RSS_DPRINTF(sc, lvl, fmt, ...) \
95 if (sc->bnx_rss_debug >= lvl) \
96 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \
98 #else /* !BNX_RSS_DEBUG */
99 #define BNX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
100 #endif /* BNX_RSS_DEBUG */
102 static const struct bnx_type {
107 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717,
108 "Broadcom BCM5717 Gigabit Ethernet" },
109 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717C,
110 "Broadcom BCM5717C Gigabit Ethernet" },
111 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718,
112 "Broadcom BCM5718 Gigabit Ethernet" },
113 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5719,
114 "Broadcom BCM5719 Gigabit Ethernet" },
115 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720_ALT,
116 "Broadcom BCM5720 Gigabit Ethernet" },
118 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5725,
119 "Broadcom BCM5725 Gigabit Ethernet" },
120 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5727,
121 "Broadcom BCM5727 Gigabit Ethernet" },
122 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5762,
123 "Broadcom BCM5762 Gigabit Ethernet" },
125 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57761,
126 "Broadcom BCM57761 Gigabit Ethernet" },
127 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57762,
128 "Broadcom BCM57762 Gigabit Ethernet" },
129 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57765,
130 "Broadcom BCM57765 Gigabit Ethernet" },
131 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57766,
132 "Broadcom BCM57766 Gigabit Ethernet" },
133 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57781,
134 "Broadcom BCM57781 Gigabit Ethernet" },
135 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57782,
136 "Broadcom BCM57782 Gigabit Ethernet" },
137 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57785,
138 "Broadcom BCM57785 Gigabit Ethernet" },
139 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57786,
140 "Broadcom BCM57786 Gigabit Ethernet" },
141 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57791,
142 "Broadcom BCM57791 Fast Ethernet" },
143 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57795,
144 "Broadcom BCM57795 Fast Ethernet" },
149 static const int bnx_tx_mailbox[BNX_TX_RING_MAX] = {
150 BGE_MBX_TX_HOST_PROD0_LO,
151 BGE_MBX_TX_HOST_PROD0_HI,
152 BGE_MBX_TX_HOST_PROD1_LO,
153 BGE_MBX_TX_HOST_PROD1_HI
156 #define BNX_IS_JUMBO_CAPABLE(sc) ((sc)->bnx_flags & BNX_FLAG_JUMBO)
157 #define BNX_IS_5717_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_5717_PLUS)
158 #define BNX_IS_57765_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_57765_PLUS)
159 #define BNX_IS_57765_FAMILY(sc) \
160 ((sc)->bnx_flags & BNX_FLAG_57765_FAMILY)
162 typedef int (*bnx_eaddr_fcn_t)(struct bnx_softc *, uint8_t[]);
164 static int bnx_probe(device_t);
165 static int bnx_attach(device_t);
166 static int bnx_detach(device_t);
167 static void bnx_shutdown(device_t);
168 static int bnx_suspend(device_t);
169 static int bnx_resume(device_t);
170 static int bnx_miibus_readreg(device_t, int, int);
171 static int bnx_miibus_writereg(device_t, int, int, int);
172 static void bnx_miibus_statchg(device_t);
174 static int bnx_handle_status(struct bnx_softc *);
176 static void bnx_npoll(struct ifnet *, struct ifpoll_info *);
177 static void bnx_npoll_rx(struct ifnet *, void *, int);
178 static void bnx_npoll_tx(struct ifnet *, void *, int);
179 static void bnx_npoll_tx_notag(struct ifnet *, void *, int);
180 static void bnx_npoll_status(struct ifnet *);
181 static void bnx_npoll_status_notag(struct ifnet *);
183 static void bnx_intr_legacy(void *);
184 static void bnx_msi(void *);
185 static void bnx_intr(struct bnx_softc *);
186 static void bnx_msix_status(void *);
187 static void bnx_msix_tx_status(void *);
188 static void bnx_msix_rx(void *);
189 static void bnx_msix_rxtx(void *);
190 static void bnx_enable_intr(struct bnx_softc *);
191 static void bnx_disable_intr(struct bnx_softc *);
192 static void bnx_txeof(struct bnx_tx_ring *, uint16_t);
193 static void bnx_rxeof(struct bnx_rx_ret_ring *, uint16_t, int);
194 static int bnx_alloc_intr(struct bnx_softc *);
195 static int bnx_setup_intr(struct bnx_softc *);
196 static void bnx_free_intr(struct bnx_softc *);
197 static void bnx_teardown_intr(struct bnx_softc *, int);
198 static int bnx_alloc_msix(struct bnx_softc *);
199 static void bnx_free_msix(struct bnx_softc *, boolean_t);
200 static void bnx_check_intr_rxtx(void *);
201 static void bnx_check_intr_rx(void *);
202 static void bnx_check_intr_tx(void *);
203 static void bnx_rx_std_refill_ithread(void *);
204 static void bnx_rx_std_refill(void *, void *);
205 static void bnx_rx_std_refill_sched_ipi(void *);
206 static void bnx_rx_std_refill_stop(void *);
207 static void bnx_rx_std_refill_sched(struct bnx_rx_ret_ring *,
208 struct bnx_rx_std_ring *);
210 static void bnx_start(struct ifnet *, struct ifaltq_subque *);
211 static int bnx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
212 static void bnx_init(void *);
213 static void bnx_stop(struct bnx_softc *);
214 static void bnx_watchdog(struct ifaltq_subque *);
215 static int bnx_ifmedia_upd(struct ifnet *);
216 static void bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *);
217 static void bnx_tick(void *);
218 static void bnx_serialize(struct ifnet *, enum ifnet_serialize);
219 static void bnx_deserialize(struct ifnet *, enum ifnet_serialize);
220 static int bnx_tryserialize(struct ifnet *, enum ifnet_serialize);
222 static void bnx_serialize_assert(struct ifnet *, enum ifnet_serialize,
225 static void bnx_serialize_skipmain(struct bnx_softc *);
226 static void bnx_deserialize_skipmain(struct bnx_softc *sc);
228 static int bnx_alloc_jumbo_mem(struct bnx_softc *);
229 static void bnx_free_jumbo_mem(struct bnx_softc *);
230 static struct bnx_jslot
231 *bnx_jalloc(struct bnx_softc *);
232 static void bnx_jfree(void *);
233 static void bnx_jref(void *);
234 static int bnx_newbuf_std(struct bnx_rx_ret_ring *, int, int);
235 static int bnx_newbuf_jumbo(struct bnx_softc *, int, int);
236 static void bnx_setup_rxdesc_std(struct bnx_rx_std_ring *, int);
237 static void bnx_setup_rxdesc_jumbo(struct bnx_softc *, int);
238 static int bnx_init_rx_ring_std(struct bnx_rx_std_ring *);
239 static void bnx_free_rx_ring_std(struct bnx_rx_std_ring *);
240 static int bnx_init_rx_ring_jumbo(struct bnx_softc *);
241 static void bnx_free_rx_ring_jumbo(struct bnx_softc *);
242 static void bnx_free_tx_ring(struct bnx_tx_ring *);
243 static int bnx_init_tx_ring(struct bnx_tx_ring *);
244 static int bnx_create_tx_ring(struct bnx_tx_ring *);
245 static void bnx_destroy_tx_ring(struct bnx_tx_ring *);
246 static int bnx_create_rx_ret_ring(struct bnx_rx_ret_ring *);
247 static void bnx_destroy_rx_ret_ring(struct bnx_rx_ret_ring *);
248 static int bnx_dma_alloc(device_t);
249 static void bnx_dma_free(struct bnx_softc *);
250 static int bnx_dma_block_alloc(struct bnx_softc *, bus_size_t,
251 bus_dma_tag_t *, bus_dmamap_t *, void **, bus_addr_t *);
252 static void bnx_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
254 bnx_defrag_shortdma(struct mbuf *);
255 static int bnx_encap(struct bnx_tx_ring *, struct mbuf **,
257 static int bnx_setup_tso(struct bnx_tx_ring *, struct mbuf **,
258 uint16_t *, uint16_t *);
259 static void bnx_setup_serialize(struct bnx_softc *);
260 static void bnx_set_tick_cpuid(struct bnx_softc *, boolean_t);
261 static void bnx_setup_ring_cnt(struct bnx_softc *);
263 static struct pktinfo *bnx_rss_info(struct pktinfo *,
264 const struct bge_rx_bd *);
265 static void bnx_init_rss(struct bnx_softc *);
266 static void bnx_reset(struct bnx_softc *);
267 static int bnx_chipinit(struct bnx_softc *);
268 static int bnx_blockinit(struct bnx_softc *);
269 static void bnx_stop_block(struct bnx_softc *, bus_size_t, uint32_t);
270 static void bnx_enable_msi(struct bnx_softc *, boolean_t);
271 static void bnx_setmulti(struct bnx_softc *);
272 static void bnx_setpromisc(struct bnx_softc *);
273 static void bnx_stats_update_regs(struct bnx_softc *);
274 static uint32_t bnx_dma_swap_options(struct bnx_softc *);
276 static uint32_t bnx_readmem_ind(struct bnx_softc *, uint32_t);
277 static void bnx_writemem_ind(struct bnx_softc *, uint32_t, uint32_t);
279 static uint32_t bnx_readreg_ind(struct bnx_softc *, uint32_t);
281 static void bnx_writemem_direct(struct bnx_softc *, uint32_t, uint32_t);
282 static void bnx_writembx(struct bnx_softc *, int, int);
283 static int bnx_read_nvram(struct bnx_softc *, caddr_t, int, int);
284 static uint8_t bnx_eeprom_getbyte(struct bnx_softc *, uint32_t, uint8_t *);
285 static int bnx_read_eeprom(struct bnx_softc *, caddr_t, uint32_t, size_t);
287 static void bnx_tbi_link_upd(struct bnx_softc *, uint32_t);
288 static void bnx_copper_link_upd(struct bnx_softc *, uint32_t);
289 static void bnx_autopoll_link_upd(struct bnx_softc *, uint32_t);
290 static void bnx_link_poll(struct bnx_softc *);
292 static int bnx_get_eaddr_mem(struct bnx_softc *, uint8_t[]);
293 static int bnx_get_eaddr_nvram(struct bnx_softc *, uint8_t[]);
294 static int bnx_get_eaddr_eeprom(struct bnx_softc *, uint8_t[]);
295 static int bnx_get_eaddr(struct bnx_softc *, uint8_t[]);
297 static void bnx_coal_change(struct bnx_softc *);
298 static int bnx_sysctl_force_defrag(SYSCTL_HANDLER_ARGS);
299 static int bnx_sysctl_tx_wreg(SYSCTL_HANDLER_ARGS);
300 static int bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS);
301 static int bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS);
302 static int bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS);
303 static int bnx_sysctl_rx_coal_bds_poll(SYSCTL_HANDLER_ARGS);
304 static int bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS);
305 static int bnx_sysctl_tx_coal_bds_poll(SYSCTL_HANDLER_ARGS);
306 static int bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS);
307 static int bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS);
308 static int bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *,
311 static int bnx_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS);
312 static int bnx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS);
313 static int bnx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS);
315 static int bnx_sysctl_std_refill(SYSCTL_HANDLER_ARGS);
317 static void bnx_sig_post_reset(struct bnx_softc *, int);
318 static void bnx_sig_pre_reset(struct bnx_softc *, int);
319 static void bnx_ape_lock_init(struct bnx_softc *);
320 static void bnx_ape_read_fw_ver(struct bnx_softc *);
321 static int bnx_ape_lock(struct bnx_softc *, int);
322 static void bnx_ape_unlock(struct bnx_softc *, int);
323 static void bnx_ape_send_event(struct bnx_softc *, uint32_t);
324 static void bnx_ape_driver_state_change(struct bnx_softc *, int);
326 static int bnx_msi_enable = 1;
327 static int bnx_msix_enable = 1;
329 static int bnx_rx_rings = 0; /* auto */
330 static int bnx_tx_rings = 0; /* auto */
332 TUNABLE_INT("hw.bnx.msi.enable", &bnx_msi_enable);
333 TUNABLE_INT("hw.bnx.msix.enable", &bnx_msix_enable);
334 TUNABLE_INT("hw.bnx.rx_rings", &bnx_rx_rings);
335 TUNABLE_INT("hw.bnx.tx_rings", &bnx_tx_rings);
337 static device_method_t bnx_methods[] = {
338 /* Device interface */
339 DEVMETHOD(device_probe, bnx_probe),
340 DEVMETHOD(device_attach, bnx_attach),
341 DEVMETHOD(device_detach, bnx_detach),
342 DEVMETHOD(device_shutdown, bnx_shutdown),
343 DEVMETHOD(device_suspend, bnx_suspend),
344 DEVMETHOD(device_resume, bnx_resume),
347 DEVMETHOD(bus_print_child, bus_generic_print_child),
348 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
351 DEVMETHOD(miibus_readreg, bnx_miibus_readreg),
352 DEVMETHOD(miibus_writereg, bnx_miibus_writereg),
353 DEVMETHOD(miibus_statchg, bnx_miibus_statchg),
358 static DEFINE_CLASS_0(bnx, bnx_driver, bnx_methods, sizeof(struct bnx_softc));
359 static devclass_t bnx_devclass;
361 DECLARE_DUMMY_MODULE(if_bnx);
362 DRIVER_MODULE(if_bnx, pci, bnx_driver, bnx_devclass, NULL, NULL);
363 DRIVER_MODULE(miibus, bnx, miibus_driver, miibus_devclass, NULL, NULL);
366 bnx_readmem_ind(struct bnx_softc *sc, uint32_t off)
368 device_t dev = sc->bnx_dev;
371 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
372 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
373 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
378 bnx_writemem_ind(struct bnx_softc *sc, uint32_t off, uint32_t val)
380 device_t dev = sc->bnx_dev;
382 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
383 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
384 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
388 bnx_writemem_direct(struct bnx_softc *sc, uint32_t off, uint32_t val)
390 CSR_WRITE_4(sc, off, val);
394 bnx_writembx(struct bnx_softc *sc, int off, int val)
396 CSR_WRITE_4(sc, off, val);
400 * Read a sequence of bytes from NVRAM.
403 bnx_read_nvram(struct bnx_softc *sc, caddr_t dest, int off, int cnt)
409 * Read a byte of data stored in the EEPROM at address 'addr.' The
410 * BCM570x supports both the traditional bitbang interface and an
411 * auto access interface for reading the EEPROM. We use the auto
415 bnx_eeprom_getbyte(struct bnx_softc *sc, uint32_t addr, uint8_t *dest)
421 * Enable use of auto EEPROM access so we can avoid
422 * having to use the bitbang method.
424 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
426 /* Reset the EEPROM, load the clock period. */
427 CSR_WRITE_4(sc, BGE_EE_ADDR,
428 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
431 /* Issue the read EEPROM command. */
432 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
434 /* Wait for completion */
435 for(i = 0; i < BNX_TIMEOUT * 10; i++) {
437 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
441 if (i == BNX_TIMEOUT) {
442 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
447 byte = CSR_READ_4(sc, BGE_EE_DATA);
449 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
455 * Read a sequence of bytes from the EEPROM.
458 bnx_read_eeprom(struct bnx_softc *sc, caddr_t dest, uint32_t off, size_t len)
464 for (byte = 0, err = 0, i = 0; i < len; i++) {
465 err = bnx_eeprom_getbyte(sc, off + i, &byte);
475 bnx_miibus_readreg(device_t dev, int phy, int reg)
477 struct bnx_softc *sc = device_get_softc(dev);
481 KASSERT(phy == sc->bnx_phyno,
482 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno));
484 if (bnx_ape_lock(sc, sc->bnx_phy_ape_lock) != 0)
487 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
488 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
489 CSR_WRITE_4(sc, BGE_MI_MODE,
490 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
494 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
495 BGE_MIPHY(phy) | BGE_MIREG(reg));
497 /* Poll for the PHY register access to complete. */
498 for (i = 0; i < BNX_TIMEOUT; i++) {
500 val = CSR_READ_4(sc, BGE_MI_COMM);
501 if ((val & BGE_MICOMM_BUSY) == 0) {
503 val = CSR_READ_4(sc, BGE_MI_COMM);
507 if (i == BNX_TIMEOUT) {
508 if_printf(&sc->arpcom.ac_if, "PHY read timed out "
509 "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val);
513 /* Restore the autopoll bit if necessary. */
514 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
515 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
519 bnx_ape_unlock(sc, sc->bnx_phy_ape_lock);
521 if (val & BGE_MICOMM_READFAIL)
524 return (val & 0xFFFF);
528 bnx_miibus_writereg(device_t dev, int phy, int reg, int val)
530 struct bnx_softc *sc = device_get_softc(dev);
533 KASSERT(phy == sc->bnx_phyno,
534 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno));
536 if (bnx_ape_lock(sc, sc->bnx_phy_ape_lock) != 0)
539 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
540 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
541 CSR_WRITE_4(sc, BGE_MI_MODE,
542 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
546 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
547 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
549 for (i = 0; i < BNX_TIMEOUT; i++) {
551 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
553 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
557 if (i == BNX_TIMEOUT) {
558 if_printf(&sc->arpcom.ac_if, "PHY write timed out "
559 "(phy %d, reg %d, val %d)\n", phy, reg, val);
562 /* Restore the autopoll bit if necessary. */
563 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
564 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
568 bnx_ape_unlock(sc, sc->bnx_phy_ape_lock);
574 bnx_miibus_statchg(device_t dev)
576 struct bnx_softc *sc;
577 struct mii_data *mii;
580 sc = device_get_softc(dev);
581 if ((sc->arpcom.ac_if.if_flags & IFF_RUNNING) == 0)
584 mii = device_get_softc(sc->bnx_miibus);
586 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
587 (IFM_ACTIVE | IFM_AVALID)) {
588 switch (IFM_SUBTYPE(mii->mii_media_active)) {
605 if (sc->bnx_link == 0)
609 * APE firmware touches these registers to keep the MAC
610 * connected to the outside world. Try to keep the
614 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) &
615 ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX);
617 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
618 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
619 mac_mode |= BGE_PORTMODE_GMII;
621 mac_mode |= BGE_PORTMODE_MII;
623 if ((mii->mii_media_active & IFM_GMASK) != IFM_FDX)
624 mac_mode |= BGE_MACMODE_HALF_DUPLEX;
626 CSR_WRITE_4(sc, BGE_MAC_MODE, mac_mode);
631 * Memory management for jumbo frames.
634 bnx_alloc_jumbo_mem(struct bnx_softc *sc)
636 struct ifnet *ifp = &sc->arpcom.ac_if;
637 struct bnx_jslot *entry;
643 * Create tag for jumbo mbufs.
644 * This is really a bit of a kludge. We allocate a special
645 * jumbo buffer pool which (thanks to the way our DMA
646 * memory allocation works) will consist of contiguous
647 * pages. This means that even though a jumbo buffer might
648 * be larger than a page size, we don't really need to
649 * map it into more than one DMA segment. However, the
650 * default mbuf tag will result in multi-segment mappings,
651 * so we have to create a special jumbo mbuf tag that
652 * lets us get away with mapping the jumbo buffers as
653 * a single segment. I think eventually the driver should
654 * be changed so that it uses ordinary mbufs and cluster
655 * buffers, i.e. jumbo frames can span multiple DMA
656 * descriptors. But that's a project for another day.
660 * Create DMA stuffs for jumbo RX ring.
662 error = bnx_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ,
663 &sc->bnx_cdata.bnx_rx_jumbo_ring_tag,
664 &sc->bnx_cdata.bnx_rx_jumbo_ring_map,
665 (void *)&sc->bnx_ldata.bnx_rx_jumbo_ring,
666 &sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
668 if_printf(ifp, "could not create jumbo RX ring\n");
673 * Create DMA stuffs for jumbo buffer block.
675 error = bnx_dma_block_alloc(sc, BNX_JMEM,
676 &sc->bnx_cdata.bnx_jumbo_tag,
677 &sc->bnx_cdata.bnx_jumbo_map,
678 (void **)&sc->bnx_ldata.bnx_jumbo_buf,
681 if_printf(ifp, "could not create jumbo buffer\n");
685 SLIST_INIT(&sc->bnx_jfree_listhead);
688 * Now divide it up into 9K pieces and save the addresses
689 * in an array. Note that we play an evil trick here by using
690 * the first few bytes in the buffer to hold the the address
691 * of the softc structure for this interface. This is because
692 * bnx_jfree() needs it, but it is called by the mbuf management
693 * code which will not pass it to us explicitly.
695 for (i = 0, ptr = sc->bnx_ldata.bnx_jumbo_buf; i < BNX_JSLOTS; i++) {
696 entry = &sc->bnx_cdata.bnx_jslots[i];
698 entry->bnx_buf = ptr;
699 entry->bnx_paddr = paddr;
700 entry->bnx_inuse = 0;
702 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead, entry, jslot_link);
711 bnx_free_jumbo_mem(struct bnx_softc *sc)
713 /* Destroy jumbo RX ring. */
714 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_jumbo_ring_tag,
715 sc->bnx_cdata.bnx_rx_jumbo_ring_map,
716 sc->bnx_ldata.bnx_rx_jumbo_ring);
718 /* Destroy jumbo buffer block. */
719 bnx_dma_block_free(sc->bnx_cdata.bnx_jumbo_tag,
720 sc->bnx_cdata.bnx_jumbo_map,
721 sc->bnx_ldata.bnx_jumbo_buf);
725 * Allocate a jumbo buffer.
727 static struct bnx_jslot *
728 bnx_jalloc(struct bnx_softc *sc)
730 struct bnx_jslot *entry;
732 lwkt_serialize_enter(&sc->bnx_jslot_serializer);
733 entry = SLIST_FIRST(&sc->bnx_jfree_listhead);
735 SLIST_REMOVE_HEAD(&sc->bnx_jfree_listhead, jslot_link);
736 entry->bnx_inuse = 1;
738 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
740 lwkt_serialize_exit(&sc->bnx_jslot_serializer);
745 * Adjust usage count on a jumbo buffer.
750 struct bnx_jslot *entry = (struct bnx_jslot *)arg;
751 struct bnx_softc *sc = entry->bnx_sc;
754 panic("bnx_jref: can't find softc pointer!");
756 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) {
757 panic("bnx_jref: asked to reference buffer "
758 "that we don't manage!");
759 } else if (entry->bnx_inuse == 0) {
760 panic("bnx_jref: buffer already free!");
762 atomic_add_int(&entry->bnx_inuse, 1);
767 * Release a jumbo buffer.
772 struct bnx_jslot *entry = (struct bnx_jslot *)arg;
773 struct bnx_softc *sc = entry->bnx_sc;
776 panic("bnx_jfree: can't find softc pointer!");
778 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) {
779 panic("bnx_jfree: asked to free buffer that we don't manage!");
780 } else if (entry->bnx_inuse == 0) {
781 panic("bnx_jfree: buffer already free!");
784 * Possible MP race to 0, use the serializer. The atomic insn
785 * is still needed for races against bnx_jref().
787 lwkt_serialize_enter(&sc->bnx_jslot_serializer);
788 atomic_subtract_int(&entry->bnx_inuse, 1);
789 if (entry->bnx_inuse == 0) {
790 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead,
793 lwkt_serialize_exit(&sc->bnx_jslot_serializer);
799 * Intialize a standard receive ring descriptor.
802 bnx_newbuf_std(struct bnx_rx_ret_ring *ret, int i, int init)
804 struct mbuf *m_new = NULL;
805 bus_dma_segment_t seg;
808 struct bnx_rx_buf *rb;
810 rb = &ret->bnx_std->bnx_rx_std_buf[i];
811 KASSERT(!rb->bnx_rx_refilled, ("RX buf %dth has been refilled", i));
813 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
818 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
819 m_adj(m_new, ETHER_ALIGN);
821 error = bus_dmamap_load_mbuf_segment(ret->bnx_rx_mtag,
822 ret->bnx_rx_tmpmap, m_new, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
829 bus_dmamap_sync(ret->bnx_rx_mtag, rb->bnx_rx_dmamap,
830 BUS_DMASYNC_POSTREAD);
831 bus_dmamap_unload(ret->bnx_rx_mtag, rb->bnx_rx_dmamap);
834 map = ret->bnx_rx_tmpmap;
835 ret->bnx_rx_tmpmap = rb->bnx_rx_dmamap;
837 rb->bnx_rx_dmamap = map;
838 rb->bnx_rx_mbuf = m_new;
839 rb->bnx_rx_paddr = seg.ds_addr;
840 rb->bnx_rx_len = m_new->m_len;
843 rb->bnx_rx_refilled = 1;
848 bnx_setup_rxdesc_std(struct bnx_rx_std_ring *std, int i)
850 struct bnx_rx_buf *rb;
855 rb = &std->bnx_rx_std_buf[i];
856 KASSERT(rb->bnx_rx_refilled, ("RX buf %dth is not refilled", i));
858 paddr = rb->bnx_rx_paddr;
859 len = rb->bnx_rx_len;
863 rb->bnx_rx_refilled = 0;
865 r = &std->bnx_rx_std_ring[i];
866 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(paddr);
867 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(paddr);
870 r->bge_flags = BGE_RXBDFLAG_END;
874 * Initialize a jumbo receive ring descriptor. This allocates
875 * a jumbo buffer from the pool managed internally by the driver.
878 bnx_newbuf_jumbo(struct bnx_softc *sc, int i, int init)
880 struct mbuf *m_new = NULL;
881 struct bnx_jslot *buf;
884 /* Allocate the mbuf. */
885 MGETHDR(m_new, init ? MB_WAIT : MB_DONTWAIT, MT_DATA);
889 /* Allocate the jumbo buffer */
890 buf = bnx_jalloc(sc);
896 /* Attach the buffer to the mbuf. */
897 m_new->m_ext.ext_arg = buf;
898 m_new->m_ext.ext_buf = buf->bnx_buf;
899 m_new->m_ext.ext_free = bnx_jfree;
900 m_new->m_ext.ext_ref = bnx_jref;
901 m_new->m_ext.ext_size = BNX_JUMBO_FRAMELEN;
903 m_new->m_flags |= M_EXT;
905 m_new->m_data = m_new->m_ext.ext_buf;
906 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
908 paddr = buf->bnx_paddr;
909 m_adj(m_new, ETHER_ALIGN);
910 paddr += ETHER_ALIGN;
912 /* Save necessary information */
913 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_rx_mbuf = m_new;
914 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_rx_paddr = paddr;
916 /* Set up the descriptor. */
917 bnx_setup_rxdesc_jumbo(sc, i);
922 bnx_setup_rxdesc_jumbo(struct bnx_softc *sc, int i)
925 struct bnx_rx_buf *rc;
927 r = &sc->bnx_ldata.bnx_rx_jumbo_ring[i];
928 rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i];
930 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_rx_paddr);
931 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_rx_paddr);
932 r->bge_len = rc->bnx_rx_mbuf->m_len;
934 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
938 bnx_init_rx_ring_std(struct bnx_rx_std_ring *std)
942 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
943 /* Use the first RX return ring's tmp RX mbuf DMA map */
944 error = bnx_newbuf_std(&std->bnx_sc->bnx_rx_ret_ring[0], i, 1);
947 bnx_setup_rxdesc_std(std, i);
950 std->bnx_rx_std_used = 0;
951 std->bnx_rx_std_refill = 0;
952 std->bnx_rx_std_running = 0;
954 lwkt_serialize_handler_enable(&std->bnx_rx_std_serialize);
956 std->bnx_rx_std = BGE_STD_RX_RING_CNT - 1;
957 bnx_writembx(std->bnx_sc, BGE_MBX_RX_STD_PROD_LO, std->bnx_rx_std);
963 bnx_free_rx_ring_std(struct bnx_rx_std_ring *std)
967 lwkt_serialize_handler_disable(&std->bnx_rx_std_serialize);
969 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
970 struct bnx_rx_buf *rb = &std->bnx_rx_std_buf[i];
972 rb->bnx_rx_refilled = 0;
973 if (rb->bnx_rx_mbuf != NULL) {
974 bus_dmamap_unload(std->bnx_rx_mtag, rb->bnx_rx_dmamap);
975 m_freem(rb->bnx_rx_mbuf);
976 rb->bnx_rx_mbuf = NULL;
978 bzero(&std->bnx_rx_std_ring[i], sizeof(struct bge_rx_bd));
983 bnx_init_rx_ring_jumbo(struct bnx_softc *sc)
988 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
989 error = bnx_newbuf_jumbo(sc, i, 1);
994 sc->bnx_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
996 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb;
997 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
998 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1000 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo);
1006 bnx_free_rx_ring_jumbo(struct bnx_softc *sc)
1010 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1011 struct bnx_rx_buf *rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i];
1013 if (rc->bnx_rx_mbuf != NULL) {
1014 m_freem(rc->bnx_rx_mbuf);
1015 rc->bnx_rx_mbuf = NULL;
1017 bzero(&sc->bnx_ldata.bnx_rx_jumbo_ring[i],
1018 sizeof(struct bge_rx_bd));
1023 bnx_free_tx_ring(struct bnx_tx_ring *txr)
1027 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1028 struct bnx_tx_buf *buf = &txr->bnx_tx_buf[i];
1030 if (buf->bnx_tx_mbuf != NULL) {
1031 bus_dmamap_unload(txr->bnx_tx_mtag,
1032 buf->bnx_tx_dmamap);
1033 m_freem(buf->bnx_tx_mbuf);
1034 buf->bnx_tx_mbuf = NULL;
1036 bzero(&txr->bnx_tx_ring[i], sizeof(struct bge_tx_bd));
1038 txr->bnx_tx_saved_considx = BNX_TXCONS_UNSET;
1042 bnx_init_tx_ring(struct bnx_tx_ring *txr)
1044 txr->bnx_tx_cnt = 0;
1045 txr->bnx_tx_saved_considx = 0;
1046 txr->bnx_tx_prodidx = 0;
1048 /* Initialize transmit producer index for host-memory send ring. */
1049 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, txr->bnx_tx_prodidx);
1055 bnx_setmulti(struct bnx_softc *sc)
1058 struct ifmultiaddr *ifma;
1059 uint32_t hashes[4] = { 0, 0, 0, 0 };
1062 ifp = &sc->arpcom.ac_if;
1064 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1065 for (i = 0; i < 4; i++)
1066 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1070 /* First, zot all the existing filters. */
1071 for (i = 0; i < 4; i++)
1072 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1074 /* Now program new ones. */
1075 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1076 if (ifma->ifma_addr->sa_family != AF_LINK)
1079 LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1080 ETHER_ADDR_LEN) & 0x7f;
1081 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1084 for (i = 0; i < 4; i++)
1085 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1089 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1090 * self-test results.
1093 bnx_chipinit(struct bnx_softc *sc)
1095 uint32_t dma_rw_ctl, mode_ctl;
1098 /* Set endian type before we access any non-PCI registers. */
1099 pci_write_config(sc->bnx_dev, BGE_PCI_MISC_CTL,
1100 BGE_INIT | BGE_PCIMISCCTL_TAGGED_STATUS, 4);
1103 * Clear the MAC statistics block in the NIC's
1106 for (i = BGE_STATS_BLOCK;
1107 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1108 BNX_MEMWIN_WRITE(sc, i, 0);
1110 for (i = BGE_STATUS_BLOCK;
1111 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1112 BNX_MEMWIN_WRITE(sc, i, 0);
1114 if (BNX_IS_57765_FAMILY(sc)) {
1117 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) {
1118 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
1119 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS;
1121 /* Access the lower 1K of PL PCI-E block registers. */
1122 CSR_WRITE_4(sc, BGE_MODE_CTL,
1123 val | BGE_MODECTL_PCIE_PL_SEL);
1125 val = CSR_READ_4(sc, BGE_PCIE_PL_LO_PHYCTL5);
1126 val |= BGE_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ;
1127 CSR_WRITE_4(sc, BGE_PCIE_PL_LO_PHYCTL5, val);
1129 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1131 if (sc->bnx_chiprev != BGE_CHIPREV_57765_AX) {
1132 /* Fix transmit hangs */
1133 val = CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL);
1134 val |= BGE_CPMU_PADRNG_CTL_RDIV2;
1135 CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL, val);
1137 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
1138 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS;
1140 /* Access the lower 1K of DL PCI-E block registers. */
1141 CSR_WRITE_4(sc, BGE_MODE_CTL,
1142 val | BGE_MODECTL_PCIE_DL_SEL);
1144 val = CSR_READ_4(sc, BGE_PCIE_DL_LO_FTSMAX);
1145 val &= ~BGE_PCIE_DL_LO_FTSMAX_MASK;
1146 val |= BGE_PCIE_DL_LO_FTSMAX_VAL;
1147 CSR_WRITE_4(sc, BGE_PCIE_DL_LO_FTSMAX, val);
1149 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1152 val = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK);
1153 val &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK;
1154 val |= BGE_CPMU_LSPD_10MB_MACCLK_6_25;
1155 CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, val);
1159 * Set up the PCI DMA control register.
1161 dma_rw_ctl = pci_read_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, 4);
1163 * Disable 32bytes cache alignment for DMA write to host memory
1166 * 64bytes cache alignment for DMA write to host memory is still
1169 dma_rw_ctl |= BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1170 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0)
1171 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1173 * Enable HW workaround for controllers that misinterpret
1174 * a status tag update and leave interrupts permanently
1177 if (sc->bnx_asicrev != BGE_ASICREV_BCM5717 &&
1178 sc->bnx_asicrev != BGE_ASICREV_BCM5762 &&
1179 !BNX_IS_57765_FAMILY(sc))
1180 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1182 if_printf(&sc->arpcom.ac_if, "DMA read/write %#x\n",
1185 pci_write_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1188 * Set up general mode register.
1190 mode_ctl = bnx_dma_swap_options(sc);
1191 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
1192 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1193 /* Retain Host-2-BMC settings written by APE firmware. */
1194 mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) &
1195 (BGE_MODECTL_BYTESWAP_B2HRX_DATA |
1196 BGE_MODECTL_WORDSWAP_B2HRX_DATA |
1197 BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE);
1199 mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR |
1200 BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM;
1201 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1204 * Disable memory write invalidate. Apparently it is not supported
1205 * properly by these devices. Also ensure that INTx isn't disabled,
1206 * as these chips need it even when using MSI.
1208 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_CMD,
1209 (PCIM_CMD_MWRICEN | PCIM_CMD_INTxDIS), 4);
1211 /* Set the timer prescaler (always 66Mhz) */
1212 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1218 bnx_blockinit(struct bnx_softc *sc)
1220 struct bnx_intr_data *intr;
1221 struct bge_rcb *rcb;
1228 * Initialize the memory window pointer register so that
1229 * we can access the first 32K of internal NIC RAM. This will
1230 * allow us to set up the TX send ring RCBs and the RX return
1231 * ring RCBs, plus other things which live in NIC memory.
1233 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1235 /* Configure mbuf pool watermarks */
1236 if (BNX_IS_57765_PLUS(sc)) {
1237 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1238 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) {
1239 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1240 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1242 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1243 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1246 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1247 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1248 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1251 /* Configure DMA resource watermarks */
1252 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1253 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1255 /* Enable buffer manager */
1256 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1258 * Change the arbitration algorithm of TXMBUF read request to
1259 * round-robin instead of priority based for BCM5719. When
1260 * TXFIFO is almost empty, RDMA will hold its request until
1261 * TXFIFO is not almost empty.
1263 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719)
1264 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
1265 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1266 sc->bnx_chipid == BGE_CHIPID_BCM5719_A0 ||
1267 sc->bnx_chipid == BGE_CHIPID_BCM5720_A0)
1268 val |= BGE_BMANMODE_LOMBUF_ATTN;
1269 CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1271 /* Poll for buffer manager start indication */
1272 for (i = 0; i < BNX_TIMEOUT; i++) {
1273 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1278 if (i == BNX_TIMEOUT) {
1279 if_printf(&sc->arpcom.ac_if,
1280 "buffer manager failed to start\n");
1284 /* Enable flow-through queues */
1285 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1286 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1288 /* Wait until queue initialization is complete */
1289 for (i = 0; i < BNX_TIMEOUT; i++) {
1290 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1295 if (i == BNX_TIMEOUT) {
1296 if_printf(&sc->arpcom.ac_if,
1297 "flow-through queue init failed\n");
1302 * Summary of rings supported by the controller:
1304 * Standard Receive Producer Ring
1305 * - This ring is used to feed receive buffers for "standard"
1306 * sized frames (typically 1536 bytes) to the controller.
1308 * Jumbo Receive Producer Ring
1309 * - This ring is used to feed receive buffers for jumbo sized
1310 * frames (i.e. anything bigger than the "standard" frames)
1311 * to the controller.
1313 * Mini Receive Producer Ring
1314 * - This ring is used to feed receive buffers for "mini"
1315 * sized frames to the controller.
1316 * - This feature required external memory for the controller
1317 * but was never used in a production system. Should always
1320 * Receive Return Ring
1321 * - After the controller has placed an incoming frame into a
1322 * receive buffer that buffer is moved into a receive return
1323 * ring. The driver is then responsible to passing the
1324 * buffer up to the stack. BCM5718/BCM57785 families support
1325 * multiple receive return rings.
1328 * - This ring is used for outgoing frames. BCM5719/BCM5720
1329 * support multiple send rings.
1332 /* Initialize the standard receive producer ring control block. */
1333 rcb = &sc->bnx_ldata.bnx_info.bnx_std_rx_rcb;
1334 rcb->bge_hostaddr.bge_addr_lo =
1335 BGE_ADDR_LO(sc->bnx_rx_std_ring.bnx_rx_std_ring_paddr);
1336 rcb->bge_hostaddr.bge_addr_hi =
1337 BGE_ADDR_HI(sc->bnx_rx_std_ring.bnx_rx_std_ring_paddr);
1338 if (BNX_IS_57765_PLUS(sc)) {
1340 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1341 * Bits 15-2 : Maximum RX frame size
1342 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
1345 rcb->bge_maxlen_flags =
1346 BGE_RCB_MAXLEN_FLAGS(512, BNX_MAX_FRAMELEN << 2);
1349 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1350 * Bits 15-2 : Reserved (should be 0)
1351 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1354 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1356 if (BNX_IS_5717_PLUS(sc))
1357 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1359 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1360 /* Write the standard receive producer ring control block. */
1361 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1362 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1363 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1364 if (!BNX_IS_5717_PLUS(sc))
1365 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1366 /* Reset the standard receive producer ring producer index. */
1367 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1370 * Initialize the jumbo RX producer ring control
1371 * block. We set the 'ring disabled' bit in the
1372 * flags field until we're actually ready to start
1373 * using this ring (i.e. once we set the MTU
1374 * high enough to require it).
1376 if (BNX_IS_JUMBO_CAPABLE(sc)) {
1377 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb;
1378 /* Get the jumbo receive producer ring RCB parameters. */
1379 rcb->bge_hostaddr.bge_addr_lo =
1380 BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
1381 rcb->bge_hostaddr.bge_addr_hi =
1382 BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
1383 rcb->bge_maxlen_flags =
1384 BGE_RCB_MAXLEN_FLAGS(BNX_MAX_FRAMELEN,
1385 BGE_RCB_FLAG_RING_DISABLED);
1386 if (BNX_IS_5717_PLUS(sc))
1387 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1389 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1390 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1391 rcb->bge_hostaddr.bge_addr_hi);
1392 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1393 rcb->bge_hostaddr.bge_addr_lo);
1394 /* Program the jumbo receive producer ring RCB parameters. */
1395 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1396 rcb->bge_maxlen_flags);
1397 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1398 /* Reset the jumbo receive producer ring producer index. */
1399 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1403 * The BD ring replenish thresholds control how often the
1404 * hardware fetches new BD's from the producer rings in host
1405 * memory. Setting the value too low on a busy system can
1406 * starve the hardware and recue the throughpout.
1408 * Set the BD ring replentish thresholds. The recommended
1409 * values are 1/8th the number of descriptors allocated to
1413 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1414 if (BNX_IS_JUMBO_CAPABLE(sc)) {
1415 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1416 BGE_JUMBO_RX_RING_CNT/8);
1418 if (BNX_IS_57765_PLUS(sc)) {
1419 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1420 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1424 * Disable all send rings by setting the 'ring disabled' bit
1425 * in the flags field of all the TX send ring control blocks,
1426 * located in NIC memory.
1428 if (BNX_IS_5717_PLUS(sc))
1430 else if (BNX_IS_57765_FAMILY(sc) ||
1431 sc->bnx_asicrev == BGE_ASICREV_BCM5762)
1435 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1436 for (i = 0; i < limit; i++) {
1437 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1438 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1439 vrcb += sizeof(struct bge_rcb);
1443 * Configure send ring RCBs
1445 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1446 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
1447 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
1449 BGE_HOSTADDR(taddr, txr->bnx_tx_ring_paddr);
1450 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi,
1452 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo,
1454 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1455 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1456 vrcb += sizeof(struct bge_rcb);
1460 * Disable all receive return rings by setting the
1461 * 'ring disabled' bit in the flags field of all the receive
1462 * return ring control blocks, located in NIC memory.
1464 if (BNX_IS_5717_PLUS(sc)) {
1465 /* Should be 17, use 16 until we get an SRAM map. */
1467 } else if (BNX_IS_57765_FAMILY(sc) ||
1468 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1473 /* Disable all receive return rings. */
1474 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1475 for (i = 0; i < limit; i++) {
1476 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1477 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1478 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1479 BGE_RCB_FLAG_RING_DISABLED);
1480 bnx_writembx(sc, BGE_MBX_RX_CONS0_LO +
1481 (i * (sizeof(uint64_t))), 0);
1482 vrcb += sizeof(struct bge_rcb);
1486 * Set up receive return rings.
1488 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1489 for (i = 0; i < sc->bnx_rx_retcnt; ++i) {
1490 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i];
1492 BGE_HOSTADDR(taddr, ret->bnx_rx_ret_ring_paddr);
1493 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi,
1495 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo,
1497 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1498 BGE_RCB_MAXLEN_FLAGS(BNX_RETURN_RING_CNT, 0));
1499 vrcb += sizeof(struct bge_rcb);
1502 /* Set random backoff seed for TX */
1503 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1504 (sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1505 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1506 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5]) &
1507 BGE_TX_BACKOFF_SEED_MASK);
1509 /* Set inter-packet gap */
1511 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
1512 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1513 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
1514 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
1516 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
1519 * Specify which ring to use for packets that don't match
1522 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1525 * Configure number of RX lists. One interrupt distribution
1526 * list, sixteen active lists, one bad frames class.
1528 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1530 /* Inialize RX list placement stats mask. */
1531 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1532 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1534 /* Disable host coalescing until we get it set up */
1535 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1537 /* Poll to make sure it's shut down. */
1538 for (i = 0; i < BNX_TIMEOUT; i++) {
1539 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1544 if (i == BNX_TIMEOUT) {
1545 if_printf(&sc->arpcom.ac_if,
1546 "host coalescing engine failed to idle\n");
1550 /* Set up host coalescing defaults */
1551 sc->bnx_coal_chg = BNX_RX_COAL_TICKS_CHG |
1552 BNX_TX_COAL_TICKS_CHG |
1553 BNX_RX_COAL_BDS_CHG |
1554 BNX_TX_COAL_BDS_CHG |
1555 BNX_RX_COAL_BDS_INT_CHG |
1556 BNX_TX_COAL_BDS_INT_CHG;
1557 bnx_coal_change(sc);
1560 * Set up addresses of status blocks
1562 intr = &sc->bnx_intr_data[0];
1563 bzero(intr->bnx_status_block, BGE_STATUS_BLK_SZ);
1564 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1565 BGE_ADDR_HI(intr->bnx_status_block_paddr));
1566 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1567 BGE_ADDR_LO(intr->bnx_status_block_paddr));
1568 for (i = 1; i < sc->bnx_intr_cnt; ++i) {
1569 intr = &sc->bnx_intr_data[i];
1570 bzero(intr->bnx_status_block, BGE_STATUS_BLK_SZ);
1571 CSR_WRITE_4(sc, BGE_VEC1_STATUSBLK_ADDR_HI + ((i - 1) * 8),
1572 BGE_ADDR_HI(intr->bnx_status_block_paddr));
1573 CSR_WRITE_4(sc, BGE_VEC1_STATUSBLK_ADDR_LO + ((i - 1) * 8),
1574 BGE_ADDR_LO(intr->bnx_status_block_paddr));
1577 /* Set up status block partail update size. */
1578 val = BGE_STATBLKSZ_32BYTE;
1581 * Does not seem to have visible effect in both
1582 * bulk data (1472B UDP datagram) and tiny data
1583 * (18B UDP datagram) TX tests.
1585 val |= BGE_HCCMODE_CLRTICK_TX;
1587 /* Turn on host coalescing state machine */
1588 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1590 /* Turn on RX BD completion state machine and enable attentions */
1591 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1592 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1594 /* Turn on RX list placement state machine */
1595 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1597 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1598 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1599 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1600 BGE_MACMODE_FRMHDR_DMA_ENB;
1602 if (sc->bnx_flags & BNX_FLAG_TBI)
1603 val |= BGE_PORTMODE_TBI;
1604 else if (sc->bnx_flags & BNX_FLAG_MII_SERDES)
1605 val |= BGE_PORTMODE_GMII;
1607 val |= BGE_PORTMODE_MII;
1609 /* Allow APE to send/receive frames. */
1610 if (sc->bnx_mfw_flags & BNX_MFW_ON_APE)
1611 val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
1613 /* Turn on DMA, clear stats */
1614 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1617 /* Set misc. local control, enable interrupts on attentions */
1618 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1621 /* Assert GPIO pins for PHY reset */
1622 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1623 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1624 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1625 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1628 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSIX)
1629 bnx_enable_msi(sc, TRUE);
1631 /* Turn on write DMA state machine */
1632 val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1633 /* Enable host coalescing bug fix. */
1634 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1635 if (sc->bnx_asicrev == BGE_ASICREV_BCM5785) {
1636 /* Request larger DMA burst size to get better performance. */
1637 val |= BGE_WDMAMODE_BURST_ALL_DATA;
1639 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1642 if (BNX_IS_57765_PLUS(sc)) {
1643 uint32_t dmactl, dmactl_reg;
1645 if (sc->bnx_asicrev == BGE_ASICREV_BCM5762)
1646 dmactl_reg = BGE_RDMA_RSRVCTRL2;
1648 dmactl_reg = BGE_RDMA_RSRVCTRL;
1650 dmactl = CSR_READ_4(sc, dmactl_reg);
1652 * Adjust tx margin to prevent TX data corruption and
1653 * fix internal FIFO overflow.
1655 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1656 sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
1657 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1658 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
1659 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
1660 BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
1661 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
1662 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
1663 BGE_RDMA_RSRVCTRL_TXMRGN_320B;
1666 * Enable fix for read DMA FIFO overruns.
1667 * The fix is to limit the number of RX BDs
1668 * the hardware would fetch at a fime.
1670 CSR_WRITE_4(sc, dmactl_reg,
1671 dmactl | BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
1674 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) {
1675 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
1676 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
1677 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
1678 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
1679 } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
1680 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1683 if (sc->bnx_asicrev == BGE_ASICREV_BCM5762)
1684 ctrl_reg = BGE_RDMA_LSO_CRPTEN_CTRL2;
1686 ctrl_reg = BGE_RDMA_LSO_CRPTEN_CTRL;
1689 * Allow 4KB burst length reads for non-LSO frames.
1690 * Enable 512B burst length reads for buffer descriptors.
1692 CSR_WRITE_4(sc, ctrl_reg,
1693 CSR_READ_4(sc, ctrl_reg) |
1694 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
1695 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
1698 /* Turn on read DMA state machine */
1699 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1700 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717)
1701 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
1702 if (sc->bnx_asicrev == BGE_ASICREV_BCM5784 ||
1703 sc->bnx_asicrev == BGE_ASICREV_BCM5785 ||
1704 sc->bnx_asicrev == BGE_ASICREV_BCM57780) {
1705 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1706 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1707 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1709 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
1710 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1711 val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
1712 BGE_RDMAMODE_H2BNC_VLAN_DET;
1714 * Allow multiple outstanding read requests from
1715 * non-LSO read DMA engine.
1717 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
1719 if (sc->bnx_asicrev == BGE_ASICREV_BCM57766)
1720 val |= BGE_RDMAMODE_JMB_2K_MMRR;
1721 if (sc->bnx_flags & BNX_FLAG_TSO)
1722 val |= BGE_RDMAMODE_TSO4_ENABLE;
1723 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1724 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1727 /* Turn on RX data completion state machine */
1728 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1730 /* Turn on RX BD initiator state machine */
1731 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1733 /* Turn on RX data and RX BD initiator state machine */
1734 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1736 /* Turn on send BD completion state machine */
1737 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1739 /* Turn on send data completion state machine */
1740 val = BGE_SDCMODE_ENABLE;
1741 if (sc->bnx_asicrev == BGE_ASICREV_BCM5761)
1742 val |= BGE_SDCMODE_CDELAY;
1743 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1745 /* Turn on send data initiator state machine */
1746 if (sc->bnx_flags & BNX_FLAG_TSO) {
1747 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
1748 BGE_SDIMODE_HW_LSO_PRE_DMA);
1750 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1753 /* Turn on send BD initiator state machine */
1754 val = BGE_SBDIMODE_ENABLE;
1755 if (sc->bnx_tx_ringcnt > 1)
1756 val |= BGE_SBDIMODE_MULTI_TXR;
1757 CSR_WRITE_4(sc, BGE_SBDI_MODE, val);
1759 /* Turn on send BD selector state machine */
1760 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1762 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1763 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1764 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1766 /* ack/clear link change events */
1767 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1768 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1769 BGE_MACSTAT_LINK_CHANGED);
1770 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1773 * Enable attention when the link has changed state for
1774 * devices that use auto polling.
1776 if (sc->bnx_flags & BNX_FLAG_TBI) {
1777 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1779 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
1780 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
1786 * Clear any pending link state attention.
1787 * Otherwise some link state change events may be lost until attention
1788 * is cleared by bnx_intr() -> bnx_softc.bnx_link_upd() sequence.
1789 * It's not necessary on newer BCM chips - perhaps enabling link
1790 * state change attentions implies clearing pending attention.
1792 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1793 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1794 BGE_MACSTAT_LINK_CHANGED);
1796 /* Enable link state change attentions. */
1797 BNX_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1803 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1804 * against our list and return its name if we find a match. Note
1805 * that since the Broadcom controller contains VPD support, we
1806 * can get the device name string from the controller itself instead
1807 * of the compiled-in string. This is a little slow, but it guarantees
1808 * we'll always announce the right product name.
1811 bnx_probe(device_t dev)
1813 const struct bnx_type *t;
1814 uint16_t product, vendor;
1816 if (!pci_is_pcie(dev))
1819 product = pci_get_device(dev);
1820 vendor = pci_get_vendor(dev);
1822 for (t = bnx_devs; t->bnx_name != NULL; t++) {
1823 if (vendor == t->bnx_vid && product == t->bnx_did)
1826 if (t->bnx_name == NULL)
1829 device_set_desc(dev, t->bnx_name);
1834 bnx_attach(device_t dev)
1837 struct bnx_softc *sc;
1838 struct bnx_rx_std_ring *std;
1840 int error = 0, rid, capmask, i, std_cpuid, std_cpuid_def;
1841 uint8_t ether_addr[ETHER_ADDR_LEN];
1843 uintptr_t mii_priv = 0;
1844 #if defined(BNX_TSO_DEBUG) || defined(BNX_RSS_DEBUG) || defined(BNX_TSS_DEBUG)
1847 #ifdef IFPOLL_ENABLE
1848 int offset, offset_def;
1851 sc = device_get_softc(dev);
1853 callout_init_mp(&sc->bnx_tick_timer);
1854 lwkt_serialize_init(&sc->bnx_jslot_serializer);
1855 lwkt_serialize_init(&sc->bnx_main_serialize);
1857 /* Always setup interrupt mailboxes */
1858 for (i = 0; i < BNX_INTR_MAX; ++i) {
1859 callout_init_mp(&sc->bnx_intr_data[i].bnx_intr_timer);
1860 sc->bnx_intr_data[i].bnx_sc = sc;
1861 sc->bnx_intr_data[i].bnx_intr_mbx = BGE_MBX_IRQ0_LO + (i * 8);
1862 sc->bnx_intr_data[i].bnx_intr_rid = -1;
1863 sc->bnx_intr_data[i].bnx_intr_cpuid = -1;
1866 sc->bnx_func_addr = pci_get_function(dev);
1867 product = pci_get_device(dev);
1869 #ifndef BURN_BRIDGES
1870 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1873 irq = pci_read_config(dev, PCIR_INTLINE, 4);
1874 mem = pci_read_config(dev, BGE_PCI_BAR0, 4);
1876 device_printf(dev, "chip is in D%d power mode "
1877 "-- setting to D0\n", pci_get_powerstate(dev));
1879 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1881 pci_write_config(dev, PCIR_INTLINE, irq, 4);
1882 pci_write_config(dev, BGE_PCI_BAR0, mem, 4);
1884 #endif /* !BURN_BRIDGE */
1887 * Map control/status registers.
1889 pci_enable_busmaster(dev);
1892 sc->bnx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1895 if (sc->bnx_res == NULL) {
1896 device_printf(dev, "couldn't map memory\n");
1900 sc->bnx_btag = rman_get_bustag(sc->bnx_res);
1901 sc->bnx_bhandle = rman_get_bushandle(sc->bnx_res);
1903 /* Save various chip information */
1905 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
1906 BGE_PCIMISCCTL_ASICREV_SHIFT;
1907 if (BGE_ASICREV(sc->bnx_chipid) == BGE_ASICREV_USE_PRODID_REG) {
1908 /* All chips having dedicated ASICREV register have CPMU */
1909 sc->bnx_flags |= BNX_FLAG_CPMU;
1912 case PCI_PRODUCT_BROADCOM_BCM5717:
1913 case PCI_PRODUCT_BROADCOM_BCM5717C:
1914 case PCI_PRODUCT_BROADCOM_BCM5718:
1915 case PCI_PRODUCT_BROADCOM_BCM5719:
1916 case PCI_PRODUCT_BROADCOM_BCM5720_ALT:
1917 case PCI_PRODUCT_BROADCOM_BCM5725:
1918 case PCI_PRODUCT_BROADCOM_BCM5727:
1919 case PCI_PRODUCT_BROADCOM_BCM5762:
1920 sc->bnx_chipid = pci_read_config(dev,
1921 BGE_PCI_GEN2_PRODID_ASICREV, 4);
1924 case PCI_PRODUCT_BROADCOM_BCM57761:
1925 case PCI_PRODUCT_BROADCOM_BCM57762:
1926 case PCI_PRODUCT_BROADCOM_BCM57765:
1927 case PCI_PRODUCT_BROADCOM_BCM57766:
1928 case PCI_PRODUCT_BROADCOM_BCM57781:
1929 case PCI_PRODUCT_BROADCOM_BCM57782:
1930 case PCI_PRODUCT_BROADCOM_BCM57785:
1931 case PCI_PRODUCT_BROADCOM_BCM57786:
1932 case PCI_PRODUCT_BROADCOM_BCM57791:
1933 case PCI_PRODUCT_BROADCOM_BCM57795:
1934 sc->bnx_chipid = pci_read_config(dev,
1935 BGE_PCI_GEN15_PRODID_ASICREV, 4);
1939 sc->bnx_chipid = pci_read_config(dev,
1940 BGE_PCI_PRODID_ASICREV, 4);
1944 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_C0)
1945 sc->bnx_chipid = BGE_CHIPID_BCM5720_A0;
1947 sc->bnx_asicrev = BGE_ASICREV(sc->bnx_chipid);
1948 sc->bnx_chiprev = BGE_CHIPREV(sc->bnx_chipid);
1950 switch (sc->bnx_asicrev) {
1951 case BGE_ASICREV_BCM5717:
1952 case BGE_ASICREV_BCM5719:
1953 case BGE_ASICREV_BCM5720:
1954 sc->bnx_flags |= BNX_FLAG_5717_PLUS | BNX_FLAG_57765_PLUS;
1957 case BGE_ASICREV_BCM5762:
1958 sc->bnx_flags |= BNX_FLAG_57765_PLUS;
1961 case BGE_ASICREV_BCM57765:
1962 case BGE_ASICREV_BCM57766:
1963 sc->bnx_flags |= BNX_FLAG_57765_FAMILY | BNX_FLAG_57765_PLUS;
1967 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1968 sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1969 sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
1970 sc->bnx_asicrev == BGE_ASICREV_BCM5762)
1971 sc->bnx_flags |= BNX_FLAG_APE;
1973 sc->bnx_flags |= BNX_FLAG_TSO;
1974 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 &&
1975 sc->bnx_chipid == BGE_CHIPID_BCM5719_A0)
1976 sc->bnx_flags &= ~BNX_FLAG_TSO;
1978 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1979 BNX_IS_57765_FAMILY(sc)) {
1981 * All BCM57785 and BCM5718 families chips have a bug that
1982 * under certain situation interrupt will not be enabled
1983 * even if status tag is written to interrupt mailbox.
1985 * While BCM5719 and BCM5720 have a hardware workaround
1986 * which could fix the above bug.
1987 * See the comment near BGE_PCIDMARWCTL_TAGGED_STATUS_WA in
1990 * For the rest of the chips in these two families, we will
1991 * have to poll the status block at high rate (10ms currently)
1992 * to check whether the interrupt is hosed or not.
1993 * See bnx_check_intr_*() for details.
1995 sc->bnx_flags |= BNX_FLAG_STATUSTAG_BUG;
1998 sc->bnx_pciecap = pci_get_pciecap_ptr(sc->bnx_dev);
1999 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
2000 sc->bnx_asicrev == BGE_ASICREV_BCM5720)
2001 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_2048);
2003 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096);
2004 device_printf(dev, "CHIP ID 0x%08x; "
2005 "ASIC REV 0x%02x; CHIP REV 0x%02x\n",
2006 sc->bnx_chipid, sc->bnx_asicrev, sc->bnx_chiprev);
2009 * Set various PHY quirk flags.
2012 capmask = MII_CAPMASK_DEFAULT;
2013 if (product == PCI_PRODUCT_BROADCOM_BCM57791 ||
2014 product == PCI_PRODUCT_BROADCOM_BCM57795) {
2016 capmask &= ~BMSR_EXTSTAT;
2019 mii_priv |= BRGPHY_FLAG_WIRESPEED;
2020 if (sc->bnx_chipid == BGE_CHIPID_BCM5762_A0)
2021 mii_priv |= BRGPHY_FLAG_5762_A0;
2024 * Chips with APE need BAR2 access for APE registers/memory.
2026 if (sc->bnx_flags & BNX_FLAG_APE) {
2030 sc->bnx_res2 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2032 if (sc->bnx_res2 == NULL) {
2033 device_printf(dev, "couldn't map BAR2 memory\n");
2038 /* Enable APE register/memory access by host driver. */
2039 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2040 pcistate |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
2041 BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
2042 BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
2043 pci_write_config(dev, BGE_PCI_PCISTATE, pcistate, 4);
2045 bnx_ape_lock_init(sc);
2046 bnx_ape_read_fw_ver(sc);
2049 /* Initialize if_name earlier, so if_printf could be used */
2050 ifp = &sc->arpcom.ac_if;
2051 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2054 * Try to reset the chip.
2056 bnx_sig_pre_reset(sc, BNX_RESET_SHUTDOWN);
2058 bnx_sig_post_reset(sc, BNX_RESET_SHUTDOWN);
2060 if (bnx_chipinit(sc)) {
2061 device_printf(dev, "chip initialization failed\n");
2067 * Get station address
2069 error = bnx_get_eaddr(sc, ether_addr);
2071 device_printf(dev, "failed to read station address\n");
2075 /* Setup RX/TX and interrupt count */
2076 bnx_setup_ring_cnt(sc);
2078 if ((sc->bnx_rx_retcnt == 1 && sc->bnx_tx_ringcnt == 1) ||
2079 (sc->bnx_rx_retcnt > 1 && sc->bnx_tx_ringcnt > 1)) {
2081 * The RX ring and the corresponding TX ring processing
2082 * should be on the same CPU, since they share the same
2085 sc->bnx_flags |= BNX_FLAG_RXTX_BUNDLE;
2087 device_printf(dev, "RX/TX bundle\n");
2088 if (sc->bnx_tx_ringcnt > 1) {
2090 * Multiple TX rings do not share status block
2091 * with link status, so link status will have
2092 * to save its own status_tag.
2094 sc->bnx_flags |= BNX_FLAG_STATUS_HASTAG;
2096 device_printf(dev, "status needs tag\n");
2099 KKASSERT(sc->bnx_rx_retcnt > 1 && sc->bnx_tx_ringcnt == 1);
2101 device_printf(dev, "RX/TX not bundled\n");
2104 error = bnx_dma_alloc(dev);
2108 #ifdef IFPOLL_ENABLE
2109 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) {
2111 * NPOLLING RX/TX CPU offset
2113 if (sc->bnx_rx_retcnt == ncpus2) {
2117 (sc->bnx_rx_retcnt * device_get_unit(dev)) % ncpus2;
2118 offset = device_getenv_int(dev, "npoll.offset",
2120 if (offset >= ncpus2 ||
2121 offset % sc->bnx_rx_retcnt != 0) {
2122 device_printf(dev, "invalid npoll.offset %d, "
2123 "use %d\n", offset, offset_def);
2124 offset = offset_def;
2127 sc->bnx_npoll_rxoff = offset;
2128 sc->bnx_npoll_txoff = offset;
2131 * NPOLLING RX CPU offset
2133 if (sc->bnx_rx_retcnt == ncpus2) {
2137 (sc->bnx_rx_retcnt * device_get_unit(dev)) % ncpus2;
2138 offset = device_getenv_int(dev, "npoll.rxoff",
2140 if (offset >= ncpus2 ||
2141 offset % sc->bnx_rx_retcnt != 0) {
2142 device_printf(dev, "invalid npoll.rxoff %d, "
2143 "use %d\n", offset, offset_def);
2144 offset = offset_def;
2147 sc->bnx_npoll_rxoff = offset;
2150 * NPOLLING TX CPU offset
2152 offset_def = device_get_unit(dev) % ncpus2;
2153 offset = device_getenv_int(dev, "npoll.txoff", offset_def);
2154 if (offset >= ncpus2) {
2155 device_printf(dev, "invalid npoll.txoff %d, use %d\n",
2156 offset, offset_def);
2157 offset = offset_def;
2159 sc->bnx_npoll_txoff = offset;
2161 #endif /* IFPOLL_ENABLE */
2164 * Allocate interrupt
2166 error = bnx_alloc_intr(sc);
2170 /* Setup serializers */
2171 bnx_setup_serialize(sc);
2173 /* Set default tuneable values. */
2174 sc->bnx_rx_coal_ticks = BNX_RX_COAL_TICKS_DEF;
2175 sc->bnx_tx_coal_ticks = BNX_TX_COAL_TICKS_DEF;
2176 sc->bnx_rx_coal_bds = BNX_RX_COAL_BDS_DEF;
2177 sc->bnx_rx_coal_bds_poll = sc->bnx_rx_ret_ring[0].bnx_rx_cntmax;
2178 sc->bnx_tx_coal_bds = BNX_TX_COAL_BDS_DEF;
2179 sc->bnx_tx_coal_bds_poll = BNX_TX_COAL_BDS_POLL_DEF;
2180 sc->bnx_rx_coal_bds_int = BNX_RX_COAL_BDS_INT_DEF;
2181 sc->bnx_tx_coal_bds_int = BNX_TX_COAL_BDS_INT_DEF;
2183 /* Set up ifnet structure */
2185 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2186 ifp->if_ioctl = bnx_ioctl;
2187 ifp->if_start = bnx_start;
2188 #ifdef IFPOLL_ENABLE
2189 ifp->if_npoll = bnx_npoll;
2191 ifp->if_init = bnx_init;
2192 ifp->if_serialize = bnx_serialize;
2193 ifp->if_deserialize = bnx_deserialize;
2194 ifp->if_tryserialize = bnx_tryserialize;
2196 ifp->if_serialize_assert = bnx_serialize_assert;
2198 ifp->if_mtu = ETHERMTU;
2199 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2201 ifp->if_capabilities |= IFCAP_HWCSUM;
2202 ifp->if_hwassist = BNX_CSUM_FEATURES;
2203 if (sc->bnx_flags & BNX_FLAG_TSO) {
2204 ifp->if_capabilities |= IFCAP_TSO;
2205 ifp->if_hwassist |= CSUM_TSO;
2207 if (BNX_RSS_ENABLED(sc))
2208 ifp->if_capabilities |= IFCAP_RSS;
2209 ifp->if_capenable = ifp->if_capabilities;
2211 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
2212 ifq_set_ready(&ifp->if_snd);
2213 ifq_set_subq_cnt(&ifp->if_snd, sc->bnx_tx_ringcnt);
2215 if (sc->bnx_tx_ringcnt > 1) {
2216 ifp->if_mapsubq = ifq_mapsubq_mask;
2217 ifq_set_subq_mask(&ifp->if_snd, sc->bnx_tx_ringcnt - 1);
2221 * Figure out what sort of media we have by checking the
2222 * hardware config word in the first 32k of NIC internal memory,
2223 * or fall back to examining the EEPROM if necessary.
2224 * Note: on some BCM5700 cards, this value appears to be unset.
2225 * If that's the case, we have to rely on identifying the NIC
2226 * by its PCI subsystem ID, as we do below for the SysKonnect
2229 if (bnx_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC) {
2230 hwcfg = bnx_readmem_ind(sc, BGE_SRAM_DATA_CFG);
2232 if (bnx_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2234 device_printf(dev, "failed to read EEPROM\n");
2238 hwcfg = ntohl(hwcfg);
2241 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2242 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41 ||
2243 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2244 sc->bnx_flags |= BNX_FLAG_TBI;
2247 if (sc->bnx_flags & BNX_FLAG_CPMU)
2248 sc->bnx_mi_mode = BGE_MIMODE_500KHZ_CONST;
2250 sc->bnx_mi_mode = BGE_MIMODE_BASE;
2252 /* Setup link status update stuffs */
2253 if (sc->bnx_flags & BNX_FLAG_TBI) {
2254 sc->bnx_link_upd = bnx_tbi_link_upd;
2255 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
2256 } else if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
2257 sc->bnx_link_upd = bnx_autopoll_link_upd;
2258 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
2260 sc->bnx_link_upd = bnx_copper_link_upd;
2261 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
2264 /* Set default PHY address */
2268 * PHY address mapping for various devices.
2270 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2271 * ---------+-------+-------+-------+-------+
2272 * BCM57XX | 1 | X | X | X |
2273 * BCM5717 | 1 | 8 | 2 | 9 |
2274 * BCM5719 | 1 | 8 | 2 | 9 |
2275 * BCM5720 | 1 | 8 | 2 | 9 |
2277 * | F2 Cu | F2 Sr | F3 Cu | F3 Sr |
2278 * ---------+-------+-------+-------+-------+
2279 * BCM57XX | X | X | X | X |
2280 * BCM5717 | X | X | X | X |
2281 * BCM5719 | 3 | 10 | 4 | 11 |
2282 * BCM5720 | X | X | X | X |
2284 * Other addresses may respond but they are not
2285 * IEEE compliant PHYs and should be ignored.
2287 if (BNX_IS_5717_PLUS(sc)) {
2288 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_A0) {
2289 if (CSR_READ_4(sc, BGE_SGDIG_STS) &
2290 BGE_SGDIGSTS_IS_SERDES)
2291 sc->bnx_phyno = sc->bnx_func_addr + 8;
2293 sc->bnx_phyno = sc->bnx_func_addr + 1;
2295 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2296 BGE_CPMU_PHY_STRAP_IS_SERDES)
2297 sc->bnx_phyno = sc->bnx_func_addr + 8;
2299 sc->bnx_phyno = sc->bnx_func_addr + 1;
2303 if (sc->bnx_flags & BNX_FLAG_TBI) {
2304 ifmedia_init(&sc->bnx_ifmedia, IFM_IMASK,
2305 bnx_ifmedia_upd, bnx_ifmedia_sts);
2306 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2307 ifmedia_add(&sc->bnx_ifmedia,
2308 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2309 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2310 ifmedia_set(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO);
2311 sc->bnx_ifmedia.ifm_media = sc->bnx_ifmedia.ifm_cur->ifm_media;
2313 struct mii_probe_args mii_args;
2315 mii_probe_args_init(&mii_args, bnx_ifmedia_upd, bnx_ifmedia_sts);
2316 mii_args.mii_probemask = 1 << sc->bnx_phyno;
2317 mii_args.mii_capmask = capmask;
2318 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY;
2319 mii_args.mii_priv = mii_priv;
2321 error = mii_probe(dev, &sc->bnx_miibus, &mii_args);
2323 device_printf(dev, "MII without any PHY!\n");
2329 * Create sysctl nodes.
2331 sysctl_ctx_init(&sc->bnx_sysctl_ctx);
2332 sc->bnx_sysctl_tree = SYSCTL_ADD_NODE(&sc->bnx_sysctl_ctx,
2333 SYSCTL_STATIC_CHILDREN(_hw),
2335 device_get_nameunit(dev),
2337 if (sc->bnx_sysctl_tree == NULL) {
2338 device_printf(dev, "can't add sysctl node\n");
2343 SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx,
2344 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2345 "rx_rings", CTLFLAG_RD, &sc->bnx_rx_retcnt, 0, "# of RX rings");
2346 SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx,
2347 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2348 "tx_rings", CTLFLAG_RD, &sc->bnx_tx_ringcnt, 0, "# of TX rings");
2350 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2351 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2352 OID_AUTO, "rx_coal_ticks",
2353 CTLTYPE_INT | CTLFLAG_RW,
2354 sc, 0, bnx_sysctl_rx_coal_ticks, "I",
2355 "Receive coalescing ticks (usec).");
2356 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2357 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2358 OID_AUTO, "tx_coal_ticks",
2359 CTLTYPE_INT | CTLFLAG_RW,
2360 sc, 0, bnx_sysctl_tx_coal_ticks, "I",
2361 "Transmit coalescing ticks (usec).");
2362 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2363 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2364 OID_AUTO, "rx_coal_bds",
2365 CTLTYPE_INT | CTLFLAG_RW,
2366 sc, 0, bnx_sysctl_rx_coal_bds, "I",
2367 "Receive max coalesced BD count.");
2368 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2369 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2370 OID_AUTO, "rx_coal_bds_poll",
2371 CTLTYPE_INT | CTLFLAG_RW,
2372 sc, 0, bnx_sysctl_rx_coal_bds_poll, "I",
2373 "Receive max coalesced BD count in polling.");
2374 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2375 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2376 OID_AUTO, "tx_coal_bds",
2377 CTLTYPE_INT | CTLFLAG_RW,
2378 sc, 0, bnx_sysctl_tx_coal_bds, "I",
2379 "Transmit max coalesced BD count.");
2380 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2381 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2382 OID_AUTO, "tx_coal_bds_poll",
2383 CTLTYPE_INT | CTLFLAG_RW,
2384 sc, 0, bnx_sysctl_tx_coal_bds_poll, "I",
2385 "Transmit max coalesced BD count in polling.");
2387 * A common design characteristic for many Broadcom
2388 * client controllers is that they only support a
2389 * single outstanding DMA read operation on the PCIe
2390 * bus. This means that it will take twice as long to
2391 * fetch a TX frame that is split into header and
2392 * payload buffers as it does to fetch a single,
2393 * contiguous TX frame (2 reads vs. 1 read). For these
2394 * controllers, coalescing buffers to reduce the number
2395 * of memory reads is effective way to get maximum
2396 * performance(about 940Mbps). Without collapsing TX
2397 * buffers the maximum TCP bulk transfer performance
2398 * is about 850Mbps. However forcing coalescing mbufs
2399 * consumes a lot of CPU cycles, so leave it off by
2402 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2403 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2404 "force_defrag", CTLTYPE_INT | CTLFLAG_RW,
2405 sc, 0, bnx_sysctl_force_defrag, "I",
2406 "Force defragment on TX path");
2408 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2409 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2410 "tx_wreg", CTLTYPE_INT | CTLFLAG_RW,
2411 sc, 0, bnx_sysctl_tx_wreg, "I",
2412 "# of segments before writing to hardware register");
2414 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2415 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2416 "std_refill", CTLTYPE_INT | CTLFLAG_RW,
2417 sc, 0, bnx_sysctl_std_refill, "I",
2418 "# of packets received before scheduling standard refilling");
2420 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2421 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2422 "rx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2423 sc, 0, bnx_sysctl_rx_coal_bds_int, "I",
2424 "Receive max coalesced BD count during interrupt.");
2425 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2426 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2427 "tx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2428 sc, 0, bnx_sysctl_tx_coal_bds_int, "I",
2429 "Transmit max coalesced BD count during interrupt.");
2431 #ifdef IFPOLL_ENABLE
2432 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) {
2433 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2434 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2435 "npoll_offset", CTLTYPE_INT | CTLFLAG_RW,
2436 sc, 0, bnx_sysctl_npoll_offset, "I",
2437 "NPOLLING cpu offset");
2439 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2440 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2441 "npoll_rxoff", CTLTYPE_INT | CTLFLAG_RW,
2442 sc, 0, bnx_sysctl_npoll_rxoff, "I",
2443 "NPOLLING RX cpu offset");
2444 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2445 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2446 "npoll_txoff", CTLTYPE_INT | CTLFLAG_RW,
2447 sc, 0, bnx_sysctl_npoll_txoff, "I",
2448 "NPOLLING TX cpu offset");
2452 #ifdef BNX_RSS_DEBUG
2453 SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx,
2454 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2455 "std_refill_mask", CTLFLAG_RD,
2456 &sc->bnx_rx_std_ring.bnx_rx_std_refill, 0, "");
2457 SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx,
2458 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2459 "std_used", CTLFLAG_RD,
2460 &sc->bnx_rx_std_ring.bnx_rx_std_used, 0, "");
2461 SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx,
2462 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2463 "rss_debug", CTLFLAG_RW, &sc->bnx_rss_debug, 0, "");
2464 for (i = 0; i < sc->bnx_rx_retcnt; ++i) {
2465 ksnprintf(desc, sizeof(desc), "rx_pkt%d", i);
2466 SYSCTL_ADD_ULONG(&sc->bnx_sysctl_ctx,
2467 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2468 desc, CTLFLAG_RW, &sc->bnx_rx_ret_ring[i].bnx_rx_pkt, "");
2470 ksnprintf(desc, sizeof(desc), "rx_force_sched%d", i);
2471 SYSCTL_ADD_ULONG(&sc->bnx_sysctl_ctx,
2472 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2474 &sc->bnx_rx_ret_ring[i].bnx_rx_force_sched, "");
2477 #ifdef BNX_TSS_DEBUG
2478 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
2479 ksnprintf(desc, sizeof(desc), "tx_pkt%d", i);
2480 SYSCTL_ADD_ULONG(&sc->bnx_sysctl_ctx,
2481 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2482 desc, CTLFLAG_RW, &sc->bnx_tx_ring[i].bnx_tx_pkt, "");
2486 SYSCTL_ADD_ULONG(&sc->bnx_sysctl_ctx,
2487 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2488 "norxbds", CTLFLAG_RW, &sc->bnx_norxbds, "");
2490 SYSCTL_ADD_ULONG(&sc->bnx_sysctl_ctx,
2491 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2492 "errors", CTLFLAG_RW, &sc->bnx_errors, "");
2494 #ifdef BNX_TSO_DEBUG
2495 for (i = 0; i < BNX_TSO_NSTATS; ++i) {
2496 ksnprintf(desc, sizeof(desc), "tso%d", i + 1);
2497 SYSCTL_ADD_ULONG(&sc->bnx_sysctl_ctx,
2498 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2499 desc, CTLFLAG_RW, &sc->bnx_tsosegs[i], "");
2504 * Call MI attach routine.
2506 ether_ifattach(ifp, ether_addr, NULL);
2508 /* Setup TX rings and subqueues */
2509 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
2510 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i);
2511 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
2513 ifsq_set_cpuid(ifsq, txr->bnx_tx_cpuid);
2514 ifsq_set_hw_serialize(ifsq, &txr->bnx_tx_serialize);
2515 ifsq_set_priv(ifsq, txr);
2516 txr->bnx_ifsq = ifsq;
2518 ifsq_watchdog_init(&txr->bnx_tx_watchdog, ifsq, bnx_watchdog);
2521 device_printf(dev, "txr %d -> cpu%d\n", i,
2526 error = bnx_setup_intr(sc);
2528 ether_ifdetach(ifp);
2531 bnx_set_tick_cpuid(sc, FALSE);
2534 * Create RX standard ring refilling thread
2536 std_cpuid_def = device_get_unit(dev) % ncpus;
2537 std_cpuid = device_getenv_int(dev, "std.cpuid", std_cpuid_def);
2538 if (std_cpuid < 0 || std_cpuid >= ncpus) {
2539 device_printf(dev, "invalid std.cpuid %d, use %d\n",
2540 std_cpuid, std_cpuid_def);
2541 std_cpuid = std_cpuid_def;
2544 std = &sc->bnx_rx_std_ring;
2545 lwkt_create(bnx_rx_std_refill_ithread, std, NULL,
2546 &std->bnx_rx_std_ithread, TDF_NOSTART | TDF_INTTHREAD, std_cpuid,
2547 "%s std", device_get_nameunit(dev));
2548 lwkt_setpri(&std->bnx_rx_std_ithread, TDPRI_INT_MED);
2549 std->bnx_rx_std_ithread.td_preemptable = lwkt_preempt;
2550 sc->bnx_flags |= BNX_FLAG_STD_THREAD;
2559 bnx_detach(device_t dev)
2561 struct bnx_softc *sc = device_get_softc(dev);
2563 if (device_is_attached(dev)) {
2564 struct ifnet *ifp = &sc->arpcom.ac_if;
2566 ifnet_serialize_all(ifp);
2568 bnx_teardown_intr(sc, sc->bnx_intr_cnt);
2569 ifnet_deserialize_all(ifp);
2571 ether_ifdetach(ifp);
2574 if (sc->bnx_flags & BNX_FLAG_STD_THREAD) {
2575 struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring;
2577 tsleep_interlock(std, 0);
2579 if (std->bnx_rx_std_ithread.td_gd == mycpu) {
2580 bnx_rx_std_refill_stop(std);
2582 lwkt_send_ipiq(std->bnx_rx_std_ithread.td_gd,
2583 bnx_rx_std_refill_stop, std);
2586 tsleep(std, PINTERLOCKED, "bnx_detach", 0);
2588 device_printf(dev, "RX std ithread exited\n");
2590 lwkt_synchronize_ipiqs("bnx_detach_ipiq");
2593 if (sc->bnx_flags & BNX_FLAG_TBI)
2594 ifmedia_removeall(&sc->bnx_ifmedia);
2596 device_delete_child(dev, sc->bnx_miibus);
2597 bus_generic_detach(dev);
2601 if (sc->bnx_msix_mem_res != NULL) {
2602 bus_release_resource(dev, SYS_RES_MEMORY, sc->bnx_msix_mem_rid,
2603 sc->bnx_msix_mem_res);
2605 if (sc->bnx_res != NULL) {
2606 bus_release_resource(dev, SYS_RES_MEMORY,
2607 BGE_PCI_BAR0, sc->bnx_res);
2609 if (sc->bnx_res2 != NULL) {
2610 bus_release_resource(dev, SYS_RES_MEMORY,
2611 PCIR_BAR(2), sc->bnx_res2);
2614 if (sc->bnx_sysctl_tree != NULL)
2615 sysctl_ctx_free(&sc->bnx_sysctl_ctx);
2619 if (sc->bnx_serialize != NULL)
2620 kfree(sc->bnx_serialize, M_DEVBUF);
2626 bnx_reset(struct bnx_softc *sc)
2628 device_t dev = sc->bnx_dev;
2629 uint32_t cachesize, command, reset, mac_mode, mac_mode_mask;
2630 void (*write_op)(struct bnx_softc *, uint32_t, uint32_t);
2634 mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE;
2635 if (sc->bnx_mfw_flags & BNX_MFW_ON_APE)
2636 mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
2637 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask;
2639 write_op = bnx_writemem_direct;
2641 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
2642 for (i = 0; i < 8000; i++) {
2643 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
2648 if_printf(&sc->arpcom.ac_if, "NVRAM lock timedout!\n");
2650 /* Take APE lock when performing reset. */
2651 bnx_ape_lock(sc, BGE_APE_LOCK_GRC);
2653 /* Save some important PCI state. */
2654 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2655 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2657 pci_write_config(dev, BGE_PCI_MISC_CTL,
2658 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2659 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2660 BGE_PCIMISCCTL_TAGGED_STATUS, 4);
2662 /* Disable fastboot on controllers that support it. */
2664 if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n");
2665 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2668 * Write the magic number to SRAM at offset 0xB50.
2669 * When firmware finishes its initialization it will
2670 * write ~BGE_SRAM_FW_MB_MAGIC to the same location.
2672 bnx_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
2674 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2676 /* XXX: Broadcom Linux driver. */
2677 /* Force PCI-E 1.0a mode */
2678 if (!BNX_IS_57765_PLUS(sc) &&
2679 CSR_READ_4(sc, BGE_PCIE_PHY_TSTCTL) ==
2680 (BGE_PCIE_PHY_TSTCTL_PSCRAM |
2681 BGE_PCIE_PHY_TSTCTL_PCIE10)) {
2682 CSR_WRITE_4(sc, BGE_PCIE_PHY_TSTCTL,
2683 BGE_PCIE_PHY_TSTCTL_PSCRAM);
2685 if (sc->bnx_chipid != BGE_CHIPID_BCM5750_A0) {
2686 /* Prevent PCIE link training during global reset */
2687 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2692 * Set GPHY Power Down Override to leave GPHY
2693 * powered up in D0 uninitialized.
2695 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0)
2696 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
2698 /* Issue global reset */
2699 write_op(sc, BGE_MISC_CFG, reset);
2703 /* XXX: Broadcom Linux driver. */
2704 if (sc->bnx_chipid == BGE_CHIPID_BCM5750_A0) {
2707 DELAY(500000); /* wait for link training to complete */
2708 v = pci_read_config(dev, 0xc4, 4);
2709 pci_write_config(dev, 0xc4, v | (1<<15), 4);
2712 devctl = pci_read_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL, 2);
2714 /* Disable no snoop and disable relaxed ordering. */
2715 devctl &= ~(PCIEM_DEVCTL_RELAX_ORDER | PCIEM_DEVCTL_NOSNOOP);
2717 /* Old PCI-E chips only support 128 bytes Max PayLoad Size. */
2718 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0) {
2719 devctl &= ~PCIEM_DEVCTL_MAX_PAYLOAD_MASK;
2720 devctl |= PCIEM_DEVCTL_MAX_PAYLOAD_128;
2723 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL,
2726 /* Clear error status. */
2727 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVSTS,
2728 PCIEM_DEVSTS_CORR_ERR |
2729 PCIEM_DEVSTS_NFATAL_ERR |
2730 PCIEM_DEVSTS_FATAL_ERR |
2731 PCIEM_DEVSTS_UNSUPP_REQ, 2);
2733 /* Reset some of the PCI state that got zapped by reset */
2734 pci_write_config(dev, BGE_PCI_MISC_CTL,
2735 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2736 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2737 BGE_PCIMISCCTL_TAGGED_STATUS, 4);
2738 val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE;
2739 if (sc->bnx_mfw_flags & BNX_MFW_ON_APE) {
2740 val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
2741 BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
2742 BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
2744 pci_write_config(dev, BGE_PCI_PCISTATE, val, 4);
2745 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2746 pci_write_config(dev, BGE_PCI_CMD, command, 4);
2748 /* Enable memory arbiter */
2749 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2751 /* Fix up byte swapping */
2752 CSR_WRITE_4(sc, BGE_MODE_CTL, bnx_dma_swap_options(sc));
2754 val = CSR_READ_4(sc, BGE_MAC_MODE);
2755 val = (val & ~mac_mode_mask) | mac_mode;
2756 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
2759 bnx_ape_unlock(sc, BGE_APE_LOCK_GRC);
2762 * Poll until we see the 1's complement of the magic number.
2763 * This indicates that the firmware initialization is complete.
2765 for (i = 0; i < BNX_FIRMWARE_TIMEOUT; i++) {
2766 val = bnx_readmem_ind(sc, BGE_SRAM_FW_MB);
2767 if (val == ~BGE_SRAM_FW_MB_MAGIC)
2771 if (i == BNX_FIRMWARE_TIMEOUT) {
2772 if_printf(&sc->arpcom.ac_if, "firmware handshake "
2773 "timed out, found 0x%08x\n", val);
2776 /* BCM57765 A0 needs additional time before accessing. */
2777 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0)
2781 * The 5704 in TBI mode apparently needs some special
2782 * adjustment to insure the SERDES drive level is set
2785 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704 &&
2786 (sc->bnx_flags & BNX_FLAG_TBI)) {
2789 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2790 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2791 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2794 CSR_WRITE_4(sc, BGE_MI_MODE,
2795 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
2798 /* XXX: Broadcom Linux driver. */
2799 if (!BNX_IS_57765_PLUS(sc)) {
2802 /* Enable Data FIFO protection. */
2803 v = CSR_READ_4(sc, BGE_PCIE_TLDLPL_PORT);
2804 CSR_WRITE_4(sc, BGE_PCIE_TLDLPL_PORT, v | (1 << 25));
2809 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
2810 BNX_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
2811 CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
2816 * Frame reception handling. This is called if there's a frame
2817 * on the receive return list.
2819 * Note: we have to be able to handle two possibilities here:
2820 * 1) the frame is from the jumbo recieve ring
2821 * 2) the frame is from the standard receive ring
2825 bnx_rxeof(struct bnx_rx_ret_ring *ret, uint16_t rx_prod, int count)
2827 struct bnx_softc *sc = ret->bnx_sc;
2828 struct bnx_rx_std_ring *std = ret->bnx_std;
2829 struct ifnet *ifp = &sc->arpcom.ac_if;
2830 int std_used = 0, cpuid = mycpuid;
2832 while (ret->bnx_rx_saved_considx != rx_prod && count != 0) {
2833 struct pktinfo pi0, *pi = NULL;
2834 struct bge_rx_bd *cur_rx;
2835 struct bnx_rx_buf *rb;
2837 struct mbuf *m = NULL;
2838 uint16_t vlan_tag = 0;
2843 cur_rx = &ret->bnx_rx_ret_ring[ret->bnx_rx_saved_considx];
2845 rxidx = cur_rx->bge_idx;
2846 KKASSERT(rxidx < BGE_STD_RX_RING_CNT);
2848 BNX_INC(ret->bnx_rx_saved_considx, BNX_RETURN_RING_CNT);
2849 #ifdef BNX_RSS_DEBUG
2853 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2855 vlan_tag = cur_rx->bge_vlan_tag;
2858 if (ret->bnx_rx_cnt >= ret->bnx_rx_cntmax) {
2859 atomic_add_int(&std->bnx_rx_std_used, std_used);
2862 bnx_rx_std_refill_sched(ret, std);
2867 rb = &std->bnx_rx_std_buf[rxidx];
2868 m = rb->bnx_rx_mbuf;
2869 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2870 IFNET_STAT_INC(ifp, ierrors, 1);
2872 rb->bnx_rx_refilled = 1;
2875 if (bnx_newbuf_std(ret, rxidx, 0)) {
2876 IFNET_STAT_INC(ifp, ierrors, 1);
2880 IFNET_STAT_INC(ifp, ipackets, 1);
2881 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2882 m->m_pkthdr.rcvif = ifp;
2884 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2885 (cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
2886 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2887 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2888 if ((cur_rx->bge_error_flag &
2889 BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
2890 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2892 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2893 m->m_pkthdr.csum_data =
2894 cur_rx->bge_tcp_udp_csum;
2895 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
2899 if (ifp->if_capenable & IFCAP_RSS) {
2900 pi = bnx_rss_info(&pi0, cur_rx);
2902 (cur_rx->bge_flags & BGE_RXBDFLAG_RSS_HASH)) {
2903 m->m_flags |= M_HASH;
2905 toeplitz_hash(cur_rx->bge_hash);
2910 * If we received a packet with a vlan tag, pass it
2911 * to vlan_input() instead of ether_input().
2914 m->m_flags |= M_VLANTAG;
2915 m->m_pkthdr.ether_vlantag = vlan_tag;
2917 ifp->if_input(ifp, m, pi, cpuid);
2919 bnx_writembx(sc, ret->bnx_rx_mbx, ret->bnx_rx_saved_considx);
2924 cur_std_used = atomic_fetchadd_int(&std->bnx_rx_std_used,
2926 if (cur_std_used + std_used >= (BGE_STD_RX_RING_CNT / 2)) {
2927 #ifdef BNX_RSS_DEBUG
2928 ret->bnx_rx_force_sched++;
2930 bnx_rx_std_refill_sched(ret, std);
2936 bnx_txeof(struct bnx_tx_ring *txr, uint16_t tx_cons)
2938 struct ifnet *ifp = &txr->bnx_sc->arpcom.ac_if;
2941 * Go through our tx ring and free mbufs for those
2942 * frames that have been sent.
2944 while (txr->bnx_tx_saved_considx != tx_cons) {
2945 struct bnx_tx_buf *buf;
2948 idx = txr->bnx_tx_saved_considx;
2949 buf = &txr->bnx_tx_buf[idx];
2950 if (buf->bnx_tx_mbuf != NULL) {
2951 IFNET_STAT_INC(ifp, opackets, 1);
2952 #ifdef BNX_TSS_DEBUG
2955 bus_dmamap_unload(txr->bnx_tx_mtag,
2956 buf->bnx_tx_dmamap);
2957 m_freem(buf->bnx_tx_mbuf);
2958 buf->bnx_tx_mbuf = NULL;
2961 BNX_INC(txr->bnx_tx_saved_considx, BGE_TX_RING_CNT);
2964 if ((BGE_TX_RING_CNT - txr->bnx_tx_cnt) >=
2965 (BNX_NSEG_RSVD + BNX_NSEG_SPARE))
2966 ifsq_clr_oactive(txr->bnx_ifsq);
2968 if (txr->bnx_tx_cnt == 0)
2969 txr->bnx_tx_watchdog.wd_timer = 0;
2971 if (!ifsq_is_empty(txr->bnx_ifsq))
2972 ifsq_devstart(txr->bnx_ifsq);
2976 bnx_handle_status(struct bnx_softc *sc)
2981 status = *sc->bnx_hw_status;
2983 if (status & BGE_STATFLAG_ERROR) {
2989 val = CSR_READ_4(sc, BGE_FLOW_ATTN);
2990 if (val & ~BGE_FLOWATTN_MB_LOWAT) {
2991 if_printf(&sc->arpcom.ac_if,
2992 "flow attn 0x%08x\n", val);
2996 val = CSR_READ_4(sc, BGE_MSI_STATUS);
2997 if (val & ~BGE_MSISTAT_MSI_PCI_REQ) {
2998 if_printf(&sc->arpcom.ac_if,
2999 "msi status 0x%08x\n", val);
3003 val = CSR_READ_4(sc, BGE_RDMA_STATUS);
3005 if_printf(&sc->arpcom.ac_if,
3006 "rmda status 0x%08x\n", val);
3010 val = CSR_READ_4(sc, BGE_WDMA_STATUS);
3012 if_printf(&sc->arpcom.ac_if,
3013 "wdma status 0x%08x\n", val);
3018 bnx_serialize_skipmain(sc);
3020 bnx_deserialize_skipmain(sc);
3025 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) || sc->bnx_link_evt) {
3027 if_printf(&sc->arpcom.ac_if, "link change, "
3028 "link_evt %d\n", sc->bnx_link_evt);
3037 #ifdef IFPOLL_ENABLE
3040 bnx_npoll_rx(struct ifnet *ifp __unused, void *xret, int cycle)
3042 struct bnx_rx_ret_ring *ret = xret;
3045 ASSERT_SERIALIZED(&ret->bnx_rx_ret_serialize);
3047 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag;
3050 rx_prod = *ret->bnx_rx_considx;
3051 if (ret->bnx_rx_saved_considx != rx_prod)
3052 bnx_rxeof(ret, rx_prod, cycle);
3056 bnx_npoll_tx_notag(struct ifnet *ifp __unused, void *xtxr, int cycle __unused)
3058 struct bnx_tx_ring *txr = xtxr;
3061 ASSERT_SERIALIZED(&txr->bnx_tx_serialize);
3063 tx_cons = *txr->bnx_tx_considx;
3064 if (txr->bnx_tx_saved_considx != tx_cons)
3065 bnx_txeof(txr, tx_cons);
3069 bnx_npoll_tx(struct ifnet *ifp, void *xtxr, int cycle)
3071 struct bnx_tx_ring *txr = xtxr;
3073 ASSERT_SERIALIZED(&txr->bnx_tx_serialize);
3075 txr->bnx_saved_status_tag = *txr->bnx_hw_status_tag;
3077 bnx_npoll_tx_notag(ifp, txr, cycle);
3081 bnx_npoll_status_notag(struct ifnet *ifp)
3083 struct bnx_softc *sc = ifp->if_softc;
3085 ASSERT_SERIALIZED(&sc->bnx_main_serialize);
3087 if (bnx_handle_status(sc)) {
3089 * Status changes are handled; force the chip to
3090 * update the status block to reflect whether there
3091 * are more status changes or not, else staled status
3092 * changes are always seen.
3094 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3099 bnx_npoll_status(struct ifnet *ifp)
3101 struct bnx_softc *sc = ifp->if_softc;
3103 ASSERT_SERIALIZED(&sc->bnx_main_serialize);
3105 sc->bnx_saved_status_tag = *sc->bnx_hw_status_tag;
3107 bnx_npoll_status_notag(ifp);
3111 bnx_npoll(struct ifnet *ifp, struct ifpoll_info *info)
3113 struct bnx_softc *sc = ifp->if_softc;
3116 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3119 if (sc->bnx_flags & BNX_FLAG_STATUS_HASTAG)
3120 info->ifpi_status.status_func = bnx_npoll_status;
3122 info->ifpi_status.status_func = bnx_npoll_status_notag;
3123 info->ifpi_status.serializer = &sc->bnx_main_serialize;
3125 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
3126 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
3127 int idx = i + sc->bnx_npoll_txoff;
3129 KKASSERT(idx < ncpus2);
3130 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) {
3131 info->ifpi_tx[idx].poll_func =
3134 info->ifpi_tx[idx].poll_func = bnx_npoll_tx;
3136 info->ifpi_tx[idx].arg = txr;
3137 info->ifpi_tx[idx].serializer = &txr->bnx_tx_serialize;
3138 ifsq_set_cpuid(txr->bnx_ifsq, idx);
3141 for (i = 0; i < sc->bnx_rx_retcnt; ++i) {
3142 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i];
3143 int idx = i + sc->bnx_npoll_rxoff;
3145 KKASSERT(idx < ncpus2);
3146 info->ifpi_rx[idx].poll_func = bnx_npoll_rx;
3147 info->ifpi_rx[idx].arg = ret;
3148 info->ifpi_rx[idx].serializer =
3149 &ret->bnx_rx_ret_serialize;
3152 if (ifp->if_flags & IFF_RUNNING) {
3153 bnx_disable_intr(sc);
3154 bnx_set_tick_cpuid(sc, TRUE);
3156 sc->bnx_coal_chg = BNX_TX_COAL_BDS_CHG |
3157 BNX_RX_COAL_BDS_CHG;
3158 bnx_coal_change(sc);
3161 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
3162 ifsq_set_cpuid(sc->bnx_tx_ring[i].bnx_ifsq,
3163 sc->bnx_tx_ring[i].bnx_tx_cpuid);
3165 if (ifp->if_flags & IFF_RUNNING) {
3166 sc->bnx_coal_chg = BNX_TX_COAL_BDS_CHG |
3167 BNX_RX_COAL_BDS_CHG;
3168 bnx_coal_change(sc);
3170 bnx_enable_intr(sc);
3171 bnx_set_tick_cpuid(sc, FALSE);
3176 #endif /* IFPOLL_ENABLE */
3179 bnx_intr_legacy(void *xsc)
3181 struct bnx_softc *sc = xsc;
3182 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0];
3184 if (ret->bnx_saved_status_tag == *ret->bnx_hw_status_tag) {
3187 val = pci_read_config(sc->bnx_dev, BGE_PCI_PCISTATE, 4);
3188 if (val & BGE_PCISTAT_INTR_NOTACT)
3194 * Interrupt will have to be disabled if tagged status
3195 * is used, else interrupt will always be asserted on
3196 * certain chips (at least on BCM5750 AX/BX).
3198 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3210 bnx_intr(struct bnx_softc *sc)
3212 struct ifnet *ifp = &sc->arpcom.ac_if;
3213 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0];
3215 ASSERT_SERIALIZED(&sc->bnx_main_serialize);
3217 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag;
3219 * Use a load fence to ensure that status_tag is saved
3220 * before rx_prod, tx_cons and status.
3224 bnx_handle_status(sc);
3226 if (ifp->if_flags & IFF_RUNNING) {
3227 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0];
3228 uint16_t rx_prod, tx_cons;
3230 lwkt_serialize_enter(&ret->bnx_rx_ret_serialize);
3231 rx_prod = *ret->bnx_rx_considx;
3232 if (ret->bnx_rx_saved_considx != rx_prod)
3233 bnx_rxeof(ret, rx_prod, -1);
3234 lwkt_serialize_exit(&ret->bnx_rx_ret_serialize);
3236 lwkt_serialize_enter(&txr->bnx_tx_serialize);
3237 tx_cons = *txr->bnx_tx_considx;
3238 if (txr->bnx_tx_saved_considx != tx_cons)
3239 bnx_txeof(txr, tx_cons);
3240 lwkt_serialize_exit(&txr->bnx_tx_serialize);
3243 bnx_writembx(sc, BGE_MBX_IRQ0_LO, ret->bnx_saved_status_tag << 24);
3247 bnx_msix_tx_status(void *xtxr)
3249 struct bnx_tx_ring *txr = xtxr;
3250 struct bnx_softc *sc = txr->bnx_sc;
3251 struct ifnet *ifp = &sc->arpcom.ac_if;
3253 ASSERT_SERIALIZED(&sc->bnx_main_serialize);
3255 txr->bnx_saved_status_tag = *txr->bnx_hw_status_tag;
3257 * Use a load fence to ensure that status_tag is saved
3258 * before tx_cons and status.
3262 bnx_handle_status(sc);
3264 if (ifp->if_flags & IFF_RUNNING) {
3267 lwkt_serialize_enter(&txr->bnx_tx_serialize);
3268 tx_cons = *txr->bnx_tx_considx;
3269 if (txr->bnx_tx_saved_considx != tx_cons)
3270 bnx_txeof(txr, tx_cons);
3271 lwkt_serialize_exit(&txr->bnx_tx_serialize);
3274 bnx_writembx(sc, BGE_MBX_IRQ0_LO, txr->bnx_saved_status_tag << 24);
3278 bnx_msix_rx(void *xret)
3280 struct bnx_rx_ret_ring *ret = xret;
3283 ASSERT_SERIALIZED(&ret->bnx_rx_ret_serialize);
3285 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag;
3287 * Use a load fence to ensure that status_tag is saved
3292 rx_prod = *ret->bnx_rx_considx;
3293 if (ret->bnx_rx_saved_considx != rx_prod)
3294 bnx_rxeof(ret, rx_prod, -1);
3296 bnx_writembx(ret->bnx_sc, ret->bnx_msix_mbx,
3297 ret->bnx_saved_status_tag << 24);
3301 bnx_msix_rxtx(void *xret)
3303 struct bnx_rx_ret_ring *ret = xret;
3304 struct bnx_tx_ring *txr = ret->bnx_txr;
3305 uint16_t rx_prod, tx_cons;
3307 ASSERT_SERIALIZED(&ret->bnx_rx_ret_serialize);
3309 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag;
3311 * Use a load fence to ensure that status_tag is saved
3312 * before rx_prod and tx_cons.
3316 rx_prod = *ret->bnx_rx_considx;
3317 if (ret->bnx_rx_saved_considx != rx_prod)
3318 bnx_rxeof(ret, rx_prod, -1);
3320 lwkt_serialize_enter(&txr->bnx_tx_serialize);
3321 tx_cons = *txr->bnx_tx_considx;
3322 if (txr->bnx_tx_saved_considx != tx_cons)
3323 bnx_txeof(txr, tx_cons);
3324 lwkt_serialize_exit(&txr->bnx_tx_serialize);
3326 bnx_writembx(ret->bnx_sc, ret->bnx_msix_mbx,
3327 ret->bnx_saved_status_tag << 24);
3331 bnx_msix_status(void *xsc)
3333 struct bnx_softc *sc = xsc;
3335 ASSERT_SERIALIZED(&sc->bnx_main_serialize);
3337 sc->bnx_saved_status_tag = *sc->bnx_hw_status_tag;
3339 * Use a load fence to ensure that status_tag is saved
3344 bnx_handle_status(sc);
3346 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_saved_status_tag << 24);
3352 struct bnx_softc *sc = xsc;
3354 lwkt_serialize_enter(&sc->bnx_main_serialize);
3356 bnx_stats_update_regs(sc);
3358 if (sc->bnx_flags & BNX_FLAG_TBI) {
3360 * Since in TBI mode auto-polling can't be used we should poll
3361 * link status manually. Here we register pending link event
3362 * and trigger interrupt.
3365 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3366 } else if (!sc->bnx_link) {
3367 mii_tick(device_get_softc(sc->bnx_miibus));
3370 callout_reset_bycpu(&sc->bnx_tick_timer, hz, bnx_tick, sc,
3371 sc->bnx_tick_cpuid);
3373 lwkt_serialize_exit(&sc->bnx_main_serialize);
3377 bnx_stats_update_regs(struct bnx_softc *sc)
3379 struct ifnet *ifp = &sc->arpcom.ac_if;
3380 struct bge_mac_stats_regs stats;
3384 s = (uint32_t *)&stats;
3385 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
3386 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
3390 IFNET_STAT_SET(ifp, collisions,
3391 (stats.dot3StatsSingleCollisionFrames +
3392 stats.dot3StatsMultipleCollisionFrames +
3393 stats.dot3StatsExcessiveCollisions +
3394 stats.dot3StatsLateCollisions));
3396 val = CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
3397 sc->bnx_norxbds += val;
3401 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3402 * pointers to descriptors.
3405 bnx_encap(struct bnx_tx_ring *txr, struct mbuf **m_head0, uint32_t *txidx,
3408 struct bge_tx_bd *d = NULL;
3409 uint16_t csum_flags = 0, vlan_tag = 0, mss = 0;
3410 bus_dma_segment_t segs[BNX_NSEG_NEW];
3412 int error, maxsegs, nsegs, idx, i;
3413 struct mbuf *m_head = *m_head0, *m_new;
3415 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3416 #ifdef BNX_TSO_DEBUG
3420 error = bnx_setup_tso(txr, m_head0, &mss, &csum_flags);
3425 #ifdef BNX_TSO_DEBUG
3426 tso_nsegs = (m_head->m_pkthdr.len /
3427 m_head->m_pkthdr.tso_segsz) - 1;
3428 if (tso_nsegs > (BNX_TSO_NSTATS - 1))
3429 tso_nsegs = BNX_TSO_NSTATS - 1;
3430 else if (tso_nsegs < 0)
3432 txr->bnx_sc->bnx_tsosegs[tso_nsegs]++;
3434 } else if (m_head->m_pkthdr.csum_flags & BNX_CSUM_FEATURES) {
3435 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
3436 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3437 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
3438 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3439 if (m_head->m_flags & M_LASTFRAG)
3440 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3441 else if (m_head->m_flags & M_FRAG)
3442 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3444 if (m_head->m_flags & M_VLANTAG) {
3445 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
3446 vlan_tag = m_head->m_pkthdr.ether_vlantag;
3450 map = txr->bnx_tx_buf[idx].bnx_tx_dmamap;
3452 maxsegs = (BGE_TX_RING_CNT - txr->bnx_tx_cnt) - BNX_NSEG_RSVD;
3453 KASSERT(maxsegs >= BNX_NSEG_SPARE,
3454 ("not enough segments %d", maxsegs));
3456 if (maxsegs > BNX_NSEG_NEW)
3457 maxsegs = BNX_NSEG_NEW;
3460 * Pad outbound frame to BGE_MIN_FRAMELEN for an unusual reason.
3461 * The bge hardware will pad out Tx runts to BGE_MIN_FRAMELEN,
3462 * but when such padded frames employ the bge IP/TCP checksum
3463 * offload, the hardware checksum assist gives incorrect results
3464 * (possibly from incorporating its own padding into the UDP/TCP
3465 * checksum; who knows). If we pad such runts with zeros, the
3466 * onboard checksum comes out correct.
3468 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
3469 m_head->m_pkthdr.len < BNX_MIN_FRAMELEN) {
3470 error = m_devpad(m_head, BNX_MIN_FRAMELEN);
3475 if ((txr->bnx_tx_flags & BNX_TX_FLAG_SHORTDMA) &&
3476 m_head->m_next != NULL) {
3477 m_new = bnx_defrag_shortdma(m_head);
3478 if (m_new == NULL) {
3482 *m_head0 = m_head = m_new;
3484 if ((m_head->m_pkthdr.csum_flags & CSUM_TSO) == 0 &&
3485 (txr->bnx_tx_flags & BNX_TX_FLAG_FORCE_DEFRAG) &&
3486 m_head->m_next != NULL) {
3488 * Forcefully defragment mbuf chain to overcome hardware
3489 * limitation which only support a single outstanding
3490 * DMA read operation. If it fails, keep moving on using
3491 * the original mbuf chain.
3493 m_new = m_defrag(m_head, MB_DONTWAIT);
3495 *m_head0 = m_head = m_new;
3498 error = bus_dmamap_load_mbuf_defrag(txr->bnx_tx_mtag, map,
3499 m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
3502 *segs_used += nsegs;
3505 bus_dmamap_sync(txr->bnx_tx_mtag, map, BUS_DMASYNC_PREWRITE);
3507 for (i = 0; ; i++) {
3508 d = &txr->bnx_tx_ring[idx];
3510 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
3511 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
3512 d->bge_len = segs[i].ds_len;
3513 d->bge_flags = csum_flags;
3514 d->bge_vlan_tag = vlan_tag;
3519 BNX_INC(idx, BGE_TX_RING_CNT);
3521 /* Mark the last segment as end of packet... */
3522 d->bge_flags |= BGE_TXBDFLAG_END;
3525 * Insure that the map for this transmission is placed at
3526 * the array index of the last descriptor in this chain.
3528 txr->bnx_tx_buf[*txidx].bnx_tx_dmamap = txr->bnx_tx_buf[idx].bnx_tx_dmamap;
3529 txr->bnx_tx_buf[idx].bnx_tx_dmamap = map;
3530 txr->bnx_tx_buf[idx].bnx_tx_mbuf = m_head;
3531 txr->bnx_tx_cnt += nsegs;
3533 BNX_INC(idx, BGE_TX_RING_CNT);
3544 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3545 * to the mbuf data regions directly in the transmit descriptors.
3548 bnx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
3550 struct bnx_tx_ring *txr = ifsq_get_priv(ifsq);
3551 struct mbuf *m_head = NULL;
3555 KKASSERT(txr->bnx_ifsq == ifsq);
3556 ASSERT_SERIALIZED(&txr->bnx_tx_serialize);
3558 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq))
3561 prodidx = txr->bnx_tx_prodidx;
3563 while (txr->bnx_tx_buf[prodidx].bnx_tx_mbuf == NULL) {
3565 * Sanity check: avoid coming within BGE_NSEG_RSVD
3566 * descriptors of the end of the ring. Also make
3567 * sure there are BGE_NSEG_SPARE descriptors for
3568 * jumbo buffers' or TSO segments' defragmentation.
3570 if ((BGE_TX_RING_CNT - txr->bnx_tx_cnt) <
3571 (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) {
3572 ifsq_set_oactive(ifsq);
3576 m_head = ifsq_dequeue(ifsq);
3581 * Pack the data into the transmit ring. If we
3582 * don't have room, set the OACTIVE flag and wait
3583 * for the NIC to drain the ring.
3585 if (bnx_encap(txr, &m_head, &prodidx, &nsegs)) {
3586 ifsq_set_oactive(ifsq);
3587 IFNET_STAT_INC(ifp, oerrors, 1);
3591 if (nsegs >= txr->bnx_tx_wreg) {
3593 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, prodidx);
3597 ETHER_BPF_MTAP(ifp, m_head);
3600 * Set a timeout in case the chip goes out to lunch.
3602 txr->bnx_tx_watchdog.wd_timer = 5;
3607 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, prodidx);
3609 txr->bnx_tx_prodidx = prodidx;
3615 struct bnx_softc *sc = xsc;
3616 struct ifnet *ifp = &sc->arpcom.ac_if;
3622 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3624 /* Cancel pending I/O and flush buffers. */
3627 bnx_sig_pre_reset(sc, BNX_RESET_START);
3629 bnx_sig_post_reset(sc, BNX_RESET_START);
3634 * Init the various state machines, ring
3635 * control blocks and firmware.
3637 if (bnx_blockinit(sc)) {
3638 if_printf(ifp, "initialization failure\n");
3644 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3645 ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
3647 /* Load our MAC address. */
3648 m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
3649 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3650 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3652 /* Enable or disable promiscuous mode as needed. */
3655 /* Program multicast filter. */
3659 if (bnx_init_rx_ring_std(&sc->bnx_rx_std_ring)) {
3660 if_printf(ifp, "RX ring initialization failed\n");
3665 /* Init jumbo RX ring. */
3666 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) {
3667 if (bnx_init_rx_ring_jumbo(sc)) {
3668 if_printf(ifp, "Jumbo RX ring initialization failed\n");
3674 /* Init our RX return ring index */
3675 for (i = 0; i < sc->bnx_rx_retcnt; ++i) {
3676 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i];
3678 ret->bnx_rx_saved_considx = 0;
3679 ret->bnx_rx_cnt = 0;
3683 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
3684 bnx_init_tx_ring(&sc->bnx_tx_ring[i]);
3686 /* Enable TX MAC state machine lockup fix. */
3687 mode = CSR_READ_4(sc, BGE_TX_MODE);
3688 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
3689 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
3690 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
3691 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
3692 mode |= CSR_READ_4(sc, BGE_TX_MODE) &
3693 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
3695 /* Turn on transmitter */
3696 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
3699 /* Initialize RSS */
3700 mode = BGE_RXMODE_ENABLE | BGE_RXMODE_IPV6_ENABLE;
3701 if (BNX_RSS_ENABLED(sc)) {
3703 mode |= BGE_RXMODE_RSS_ENABLE |
3704 BGE_RXMODE_RSS_HASH_MASK_BITS |
3705 BGE_RXMODE_RSS_IPV4_HASH |
3706 BGE_RXMODE_RSS_TCP_IPV4_HASH;
3708 /* Turn on receiver */
3709 BNX_SETBIT(sc, BGE_RX_MODE, mode);
3713 * Set the number of good frames to receive after RX MBUF
3714 * Low Watermark has been reached. After the RX MAC receives
3715 * this number of frames, it will drop subsequent incoming
3716 * frames until the MBUF High Watermark is reached.
3718 if (BNX_IS_57765_FAMILY(sc))
3719 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
3721 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
3723 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI ||
3724 sc->bnx_intr_type == PCI_INTR_TYPE_MSIX) {
3726 if_printf(ifp, "MSI_MODE: %#x\n",
3727 CSR_READ_4(sc, BGE_MSI_MODE));
3731 /* Tell firmware we're alive. */
3732 BNX_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3734 /* Enable host interrupts if polling(4) is not enabled. */
3735 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA, 4);
3738 #ifdef IFPOLL_ENABLE
3739 if (ifp->if_flags & IFF_NPOLLING)
3743 bnx_disable_intr(sc);
3745 bnx_enable_intr(sc);
3746 bnx_set_tick_cpuid(sc, polling);
3748 ifp->if_flags |= IFF_RUNNING;
3749 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
3750 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
3752 ifsq_clr_oactive(txr->bnx_ifsq);
3753 ifsq_watchdog_start(&txr->bnx_tx_watchdog);
3756 bnx_ifmedia_upd(ifp);
3758 callout_reset_bycpu(&sc->bnx_tick_timer, hz, bnx_tick, sc,
3759 sc->bnx_tick_cpuid);
3763 * Set media options.
3766 bnx_ifmedia_upd(struct ifnet *ifp)
3768 struct bnx_softc *sc = ifp->if_softc;
3770 /* If this is a 1000baseX NIC, enable the TBI port. */
3771 if (sc->bnx_flags & BNX_FLAG_TBI) {
3772 struct ifmedia *ifm = &sc->bnx_ifmedia;
3774 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3777 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3782 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3783 BNX_CLRBIT(sc, BGE_MAC_MODE,
3784 BGE_MACMODE_HALF_DUPLEX);
3786 BNX_SETBIT(sc, BGE_MAC_MODE,
3787 BGE_MACMODE_HALF_DUPLEX);
3795 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3799 if (mii->mii_instance) {
3800 struct mii_softc *miisc;
3802 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3803 mii_phy_reset(miisc);
3808 * Force an interrupt so that we will call bnx_link_upd
3809 * if needed and clear any pending link state attention.
3810 * Without this we are not getting any further interrupts
3811 * for link state changes and thus will not UP the link and
3812 * not be able to send in bnx_start. The only way to get
3813 * things working was to receive a packet and get an RX
3816 * bnx_tick should help for fiber cards and we might not
3817 * need to do this here if BNX_FLAG_TBI is set but as
3818 * we poll for fiber anyway it should not harm.
3820 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3826 * Report current media status.
3829 bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3831 struct bnx_softc *sc = ifp->if_softc;
3833 if ((ifp->if_flags & IFF_RUNNING) == 0)
3836 if (sc->bnx_flags & BNX_FLAG_TBI) {
3837 ifmr->ifm_status = IFM_AVALID;
3838 ifmr->ifm_active = IFM_ETHER;
3839 if (CSR_READ_4(sc, BGE_MAC_STS) &
3840 BGE_MACSTAT_TBI_PCS_SYNCHED) {
3841 ifmr->ifm_status |= IFM_ACTIVE;
3843 ifmr->ifm_active |= IFM_NONE;
3847 ifmr->ifm_active |= IFM_1000_SX;
3848 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3849 ifmr->ifm_active |= IFM_HDX;
3851 ifmr->ifm_active |= IFM_FDX;
3853 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3856 ifmr->ifm_active = mii->mii_media_active;
3857 ifmr->ifm_status = mii->mii_media_status;
3862 bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
3864 struct bnx_softc *sc = ifp->if_softc;
3865 struct ifreq *ifr = (struct ifreq *)data;
3866 int mask, error = 0;
3868 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3872 if ((!BNX_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) ||
3873 (BNX_IS_JUMBO_CAPABLE(sc) &&
3874 ifr->ifr_mtu > BNX_JUMBO_MTU)) {
3876 } else if (ifp->if_mtu != ifr->ifr_mtu) {
3877 ifp->if_mtu = ifr->ifr_mtu;
3878 if (ifp->if_flags & IFF_RUNNING)
3883 if (ifp->if_flags & IFF_UP) {
3884 if (ifp->if_flags & IFF_RUNNING) {
3885 mask = ifp->if_flags ^ sc->bnx_if_flags;
3888 * If only the state of the PROMISC flag
3889 * changed, then just use the 'set promisc
3890 * mode' command instead of reinitializing
3891 * the entire NIC. Doing a full re-init
3892 * means reloading the firmware and waiting
3893 * for it to start up, which may take a
3894 * second or two. Similarly for ALLMULTI.
3896 if (mask & IFF_PROMISC)
3898 if (mask & IFF_ALLMULTI)
3903 } else if (ifp->if_flags & IFF_RUNNING) {
3906 sc->bnx_if_flags = ifp->if_flags;
3910 if (ifp->if_flags & IFF_RUNNING)
3915 if (sc->bnx_flags & BNX_FLAG_TBI) {
3916 error = ifmedia_ioctl(ifp, ifr,
3917 &sc->bnx_ifmedia, command);
3919 struct mii_data *mii;
3921 mii = device_get_softc(sc->bnx_miibus);
3922 error = ifmedia_ioctl(ifp, ifr,
3923 &mii->mii_media, command);
3927 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3928 if (mask & IFCAP_HWCSUM) {
3929 ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
3930 if (ifp->if_capenable & IFCAP_TXCSUM)
3931 ifp->if_hwassist |= BNX_CSUM_FEATURES;
3933 ifp->if_hwassist &= ~BNX_CSUM_FEATURES;
3935 if (mask & IFCAP_TSO) {
3936 ifp->if_capenable ^= (mask & IFCAP_TSO);
3937 if (ifp->if_capenable & IFCAP_TSO)
3938 ifp->if_hwassist |= CSUM_TSO;
3940 ifp->if_hwassist &= ~CSUM_TSO;
3942 if (mask & IFCAP_RSS)
3943 ifp->if_capenable ^= IFCAP_RSS;
3946 error = ether_ioctl(ifp, command, data);
3953 bnx_watchdog(struct ifaltq_subque *ifsq)
3955 struct ifnet *ifp = ifsq_get_ifp(ifsq);
3956 struct bnx_softc *sc = ifp->if_softc;
3959 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3961 if_printf(ifp, "watchdog timeout -- resetting\n");
3965 IFNET_STAT_INC(ifp, oerrors, 1);
3967 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
3968 ifsq_devstart_sched(sc->bnx_tx_ring[i].bnx_ifsq);
3972 * Stop the adapter and free any mbufs allocated to the
3976 bnx_stop(struct bnx_softc *sc)
3978 struct ifnet *ifp = &sc->arpcom.ac_if;
3981 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3983 callout_stop(&sc->bnx_tick_timer);
3985 /* Disable host interrupts. */
3986 bnx_disable_intr(sc);
3989 * Tell firmware we're shutting down.
3991 bnx_sig_pre_reset(sc, BNX_RESET_SHUTDOWN);
3994 * Disable all of the receiver blocks
3996 bnx_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3997 bnx_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3998 bnx_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3999 bnx_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
4000 bnx_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
4001 bnx_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
4004 * Disable all of the transmit blocks
4006 bnx_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
4007 bnx_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
4008 bnx_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
4009 bnx_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
4010 bnx_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
4011 bnx_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
4014 * Shut down all of the memory managers and related
4017 bnx_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
4018 bnx_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
4019 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
4020 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
4023 bnx_sig_post_reset(sc, BNX_RESET_SHUTDOWN);
4026 * Tell firmware we're shutting down.
4028 BNX_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4030 /* Free the RX lists. */
4031 bnx_free_rx_ring_std(&sc->bnx_rx_std_ring);
4033 /* Free jumbo RX list. */
4034 if (BNX_IS_JUMBO_CAPABLE(sc))
4035 bnx_free_rx_ring_jumbo(sc);
4037 /* Free TX buffers. */
4038 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
4039 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
4041 txr->bnx_saved_status_tag = 0;
4042 bnx_free_tx_ring(txr);
4045 /* Clear saved status tag */
4046 for (i = 0; i < sc->bnx_rx_retcnt; ++i)
4047 sc->bnx_rx_ret_ring[i].bnx_saved_status_tag = 0;
4050 sc->bnx_coal_chg = 0;
4052 ifp->if_flags &= ~IFF_RUNNING;
4053 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
4054 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
4056 ifsq_clr_oactive(txr->bnx_ifsq);
4057 ifsq_watchdog_stop(&txr->bnx_tx_watchdog);
4062 * Stop all chip I/O so that the kernel's probe routines don't
4063 * get confused by errant DMAs when rebooting.
4066 bnx_shutdown(device_t dev)
4068 struct bnx_softc *sc = device_get_softc(dev);
4069 struct ifnet *ifp = &sc->arpcom.ac_if;
4071 ifnet_serialize_all(ifp);
4073 ifnet_deserialize_all(ifp);
4077 bnx_suspend(device_t dev)
4079 struct bnx_softc *sc = device_get_softc(dev);
4080 struct ifnet *ifp = &sc->arpcom.ac_if;
4082 ifnet_serialize_all(ifp);
4084 ifnet_deserialize_all(ifp);
4090 bnx_resume(device_t dev)
4092 struct bnx_softc *sc = device_get_softc(dev);
4093 struct ifnet *ifp = &sc->arpcom.ac_if;
4095 ifnet_serialize_all(ifp);
4097 if (ifp->if_flags & IFF_UP) {
4101 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
4102 ifsq_devstart_sched(sc->bnx_tx_ring[i].bnx_ifsq);
4105 ifnet_deserialize_all(ifp);
4111 bnx_setpromisc(struct bnx_softc *sc)
4113 struct ifnet *ifp = &sc->arpcom.ac_if;
4115 if (ifp->if_flags & IFF_PROMISC)
4116 BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
4118 BNX_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
4122 bnx_dma_free(struct bnx_softc *sc)
4124 struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring;
4127 /* Destroy RX return rings */
4128 if (sc->bnx_rx_ret_ring != NULL) {
4129 for (i = 0; i < sc->bnx_rx_retcnt; ++i)
4130 bnx_destroy_rx_ret_ring(&sc->bnx_rx_ret_ring[i]);
4131 kfree(sc->bnx_rx_ret_ring, M_DEVBUF);
4134 /* Destroy RX mbuf DMA stuffs. */
4135 if (std->bnx_rx_mtag != NULL) {
4136 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
4137 KKASSERT(std->bnx_rx_std_buf[i].bnx_rx_mbuf == NULL);
4138 bus_dmamap_destroy(std->bnx_rx_mtag,
4139 std->bnx_rx_std_buf[i].bnx_rx_dmamap);
4141 bus_dma_tag_destroy(std->bnx_rx_mtag);
4144 /* Destroy standard RX ring */
4145 bnx_dma_block_free(std->bnx_rx_std_ring_tag,
4146 std->bnx_rx_std_ring_map, std->bnx_rx_std_ring);
4148 /* Destroy TX rings */
4149 if (sc->bnx_tx_ring != NULL) {
4150 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
4151 bnx_destroy_tx_ring(&sc->bnx_tx_ring[i]);
4152 kfree(sc->bnx_tx_ring, M_DEVBUF);
4155 if (BNX_IS_JUMBO_CAPABLE(sc))
4156 bnx_free_jumbo_mem(sc);
4158 /* Destroy status blocks */
4159 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
4160 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
4162 bnx_dma_block_free(intr->bnx_status_tag,
4163 intr->bnx_status_map, intr->bnx_status_block);
4166 /* Destroy the parent tag */
4167 if (sc->bnx_cdata.bnx_parent_tag != NULL)
4168 bus_dma_tag_destroy(sc->bnx_cdata.bnx_parent_tag);
4172 bnx_dma_alloc(device_t dev)
4174 struct bnx_softc *sc = device_get_softc(dev);
4175 struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring;
4179 * Allocate the parent bus DMA tag appropriate for PCI.
4181 * All of the NetExtreme/NetLink controllers have 4GB boundary
4183 * Whenever an address crosses a multiple of the 4GB boundary
4184 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
4185 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
4186 * state machine will lockup and cause the device to hang.
4188 error = bus_dma_tag_create(NULL, 1, BGE_DMA_BOUNDARY_4G,
4189 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
4190 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
4191 0, &sc->bnx_cdata.bnx_parent_tag);
4193 device_printf(dev, "could not create parent DMA tag\n");
4198 * Create DMA stuffs for status blocks.
4200 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
4201 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
4203 error = bnx_dma_block_alloc(sc,
4204 __VM_CACHELINE_ALIGN(BGE_STATUS_BLK_SZ),
4205 &intr->bnx_status_tag, &intr->bnx_status_map,
4206 (void *)&intr->bnx_status_block,
4207 &intr->bnx_status_block_paddr);
4210 "could not create %dth status block\n", i);
4214 sc->bnx_hw_status = &sc->bnx_intr_data[0].bnx_status_block->bge_status;
4215 if (sc->bnx_flags & BNX_FLAG_STATUS_HASTAG) {
4216 sc->bnx_hw_status_tag =
4217 &sc->bnx_intr_data[0].bnx_status_block->bge_status_tag;
4221 * Create DMA tag and maps for RX mbufs.
4224 lwkt_serialize_init(&std->bnx_rx_std_serialize);
4225 error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0,
4226 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
4227 NULL, NULL, MCLBYTES, 1, MCLBYTES,
4228 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, &std->bnx_rx_mtag);
4230 device_printf(dev, "could not create RX mbuf DMA tag\n");
4234 for (i = 0; i < BGE_STD_RX_RING_CNT; ++i) {
4235 error = bus_dmamap_create(std->bnx_rx_mtag, BUS_DMA_WAITOK,
4236 &std->bnx_rx_std_buf[i].bnx_rx_dmamap);
4240 for (j = 0; j < i; ++j) {
4241 bus_dmamap_destroy(std->bnx_rx_mtag,
4242 std->bnx_rx_std_buf[j].bnx_rx_dmamap);
4244 bus_dma_tag_destroy(std->bnx_rx_mtag);
4245 std->bnx_rx_mtag = NULL;
4248 "could not create %dth RX mbuf DMA map\n", i);
4254 * Create DMA stuffs for standard RX ring.
4256 error = bnx_dma_block_alloc(sc, BGE_STD_RX_RING_SZ,
4257 &std->bnx_rx_std_ring_tag,
4258 &std->bnx_rx_std_ring_map,
4259 (void *)&std->bnx_rx_std_ring,
4260 &std->bnx_rx_std_ring_paddr);
4262 device_printf(dev, "could not create std RX ring\n");
4267 * Create RX return rings
4269 mbx = BGE_MBX_RX_CONS0_LO;
4270 sc->bnx_rx_ret_ring = kmalloc_cachealign(
4271 sizeof(struct bnx_rx_ret_ring) * sc->bnx_rx_retcnt, M_DEVBUF,
4273 for (i = 0; i < sc->bnx_rx_retcnt; ++i) {
4274 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i];
4275 struct bnx_intr_data *intr;
4279 ret->bnx_rx_mbx = mbx;
4280 ret->bnx_rx_cntmax = (BGE_STD_RX_RING_CNT / 4) /
4282 ret->bnx_rx_mask = 1 << i;
4284 if (!BNX_RSS_ENABLED(sc)) {
4285 intr = &sc->bnx_intr_data[0];
4287 KKASSERT(i + 1 < sc->bnx_intr_cnt);
4288 intr = &sc->bnx_intr_data[i + 1];
4292 ret->bnx_rx_considx =
4293 &intr->bnx_status_block->bge_idx[0].bge_rx_prod_idx;
4294 } else if (i == 1) {
4295 ret->bnx_rx_considx =
4296 &intr->bnx_status_block->bge_rx_jumbo_cons_idx;
4297 } else if (i == 2) {
4298 ret->bnx_rx_considx =
4299 &intr->bnx_status_block->bge_rsvd1;
4300 } else if (i == 3) {
4301 ret->bnx_rx_considx =
4302 &intr->bnx_status_block->bge_rx_mini_cons_idx;
4304 panic("unknown RX return ring %d\n", i);
4306 ret->bnx_hw_status_tag =
4307 &intr->bnx_status_block->bge_status_tag;
4309 error = bnx_create_rx_ret_ring(ret);
4312 "could not create %dth RX ret ring\n", i);
4321 sc->bnx_tx_ring = kmalloc_cachealign(
4322 sizeof(struct bnx_tx_ring) * sc->bnx_tx_ringcnt, M_DEVBUF,
4324 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
4325 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
4326 struct bnx_intr_data *intr;
4329 txr->bnx_tx_mbx = bnx_tx_mailbox[i];
4331 if (sc->bnx_tx_ringcnt == 1) {
4332 intr = &sc->bnx_intr_data[0];
4334 KKASSERT(i + 1 < sc->bnx_intr_cnt);
4335 intr = &sc->bnx_intr_data[i + 1];
4338 if ((sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) == 0) {
4339 txr->bnx_hw_status_tag =
4340 &intr->bnx_status_block->bge_status_tag;
4342 txr->bnx_tx_considx =
4343 &intr->bnx_status_block->bge_idx[0].bge_tx_cons_idx;
4345 error = bnx_create_tx_ring(txr);
4348 "could not create %dth TX ring\n", i);
4354 * Create jumbo buffer pool.
4356 if (BNX_IS_JUMBO_CAPABLE(sc)) {
4357 error = bnx_alloc_jumbo_mem(sc);
4360 "could not create jumbo buffer pool\n");
4369 bnx_dma_block_alloc(struct bnx_softc *sc, bus_size_t size, bus_dma_tag_t *tag,
4370 bus_dmamap_t *map, void **addr, bus_addr_t *paddr)
4375 error = bus_dmamem_coherent(sc->bnx_cdata.bnx_parent_tag, PAGE_SIZE, 0,
4376 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
4377 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
4381 *tag = dmem.dmem_tag;
4382 *map = dmem.dmem_map;
4383 *addr = dmem.dmem_addr;
4384 *paddr = dmem.dmem_busaddr;
4390 bnx_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr)
4393 bus_dmamap_unload(tag, map);
4394 bus_dmamem_free(tag, addr, map);
4395 bus_dma_tag_destroy(tag);
4400 bnx_tbi_link_upd(struct bnx_softc *sc, uint32_t status)
4402 struct ifnet *ifp = &sc->arpcom.ac_if;
4404 #define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE)
4407 * Sometimes PCS encoding errors are detected in
4408 * TBI mode (on fiber NICs), and for some reason
4409 * the chip will signal them as link changes.
4410 * If we get a link change event, but the 'PCS
4411 * encoding error' bit in the MAC status register
4412 * is set, don't bother doing a link check.
4413 * This avoids spurious "gigabit link up" messages
4414 * that sometimes appear on fiber NICs during
4415 * periods of heavy traffic.
4417 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
4418 if (!sc->bnx_link) {
4420 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704) {
4421 BNX_CLRBIT(sc, BGE_MAC_MODE,
4422 BGE_MACMODE_TBI_SEND_CFGS);
4425 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
4428 if_printf(ifp, "link UP\n");
4430 ifp->if_link_state = LINK_STATE_UP;
4431 if_link_state_change(ifp);
4433 } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) {
4438 if_printf(ifp, "link DOWN\n");
4440 ifp->if_link_state = LINK_STATE_DOWN;
4441 if_link_state_change(ifp);
4445 #undef PCS_ENCODE_ERR
4447 /* Clear the attention. */
4448 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
4449 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
4450 BGE_MACSTAT_LINK_CHANGED);
4454 bnx_copper_link_upd(struct bnx_softc *sc, uint32_t status __unused)
4456 struct ifnet *ifp = &sc->arpcom.ac_if;
4457 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
4460 bnx_miibus_statchg(sc->bnx_dev);
4464 if_printf(ifp, "link UP\n");
4466 if_printf(ifp, "link DOWN\n");
4469 /* Clear the attention. */
4470 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
4471 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
4472 BGE_MACSTAT_LINK_CHANGED);
4476 bnx_autopoll_link_upd(struct bnx_softc *sc, uint32_t status __unused)
4478 struct ifnet *ifp = &sc->arpcom.ac_if;
4479 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
4483 if (!sc->bnx_link &&
4484 (mii->mii_media_status & IFM_ACTIVE) &&
4485 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4488 if_printf(ifp, "link UP\n");
4489 } else if (sc->bnx_link &&
4490 (!(mii->mii_media_status & IFM_ACTIVE) ||
4491 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4494 if_printf(ifp, "link DOWN\n");
4497 /* Clear the attention. */
4498 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
4499 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
4500 BGE_MACSTAT_LINK_CHANGED);
4504 bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS)
4506 struct bnx_softc *sc = arg1;
4508 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
4509 &sc->bnx_rx_coal_ticks,
4510 BNX_RX_COAL_TICKS_MIN, BNX_RX_COAL_TICKS_MAX,
4511 BNX_RX_COAL_TICKS_CHG);
4515 bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS)
4517 struct bnx_softc *sc = arg1;
4519 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
4520 &sc->bnx_tx_coal_ticks,
4521 BNX_TX_COAL_TICKS_MIN, BNX_TX_COAL_TICKS_MAX,
4522 BNX_TX_COAL_TICKS_CHG);
4526 bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS)
4528 struct bnx_softc *sc = arg1;
4530 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
4531 &sc->bnx_rx_coal_bds,
4532 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX,
4533 BNX_RX_COAL_BDS_CHG);
4537 bnx_sysctl_rx_coal_bds_poll(SYSCTL_HANDLER_ARGS)
4539 struct bnx_softc *sc = arg1;
4541 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
4542 &sc->bnx_rx_coal_bds_poll,
4543 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX,
4544 BNX_RX_COAL_BDS_CHG);
4548 bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS)
4550 struct bnx_softc *sc = arg1;
4552 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
4553 &sc->bnx_tx_coal_bds,
4554 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX,
4555 BNX_TX_COAL_BDS_CHG);
4559 bnx_sysctl_tx_coal_bds_poll(SYSCTL_HANDLER_ARGS)
4561 struct bnx_softc *sc = arg1;
4563 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
4564 &sc->bnx_tx_coal_bds_poll,
4565 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX,
4566 BNX_TX_COAL_BDS_CHG);
4570 bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS)
4572 struct bnx_softc *sc = arg1;
4574 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
4575 &sc->bnx_rx_coal_bds_int,
4576 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX,
4577 BNX_RX_COAL_BDS_INT_CHG);
4581 bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS)
4583 struct bnx_softc *sc = arg1;
4585 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
4586 &sc->bnx_tx_coal_bds_int,
4587 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX,
4588 BNX_TX_COAL_BDS_INT_CHG);
4592 bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal,
4593 int coal_min, int coal_max, uint32_t coal_chg_mask)
4595 struct bnx_softc *sc = arg1;
4596 struct ifnet *ifp = &sc->arpcom.ac_if;
4599 ifnet_serialize_all(ifp);
4602 error = sysctl_handle_int(oidp, &v, 0, req);
4603 if (!error && req->newptr != NULL) {
4604 if (v < coal_min || v > coal_max) {
4608 sc->bnx_coal_chg |= coal_chg_mask;
4610 /* Commit changes */
4611 bnx_coal_change(sc);
4615 ifnet_deserialize_all(ifp);
4620 bnx_coal_change(struct bnx_softc *sc)
4622 struct ifnet *ifp = &sc->arpcom.ac_if;
4625 ASSERT_IFNET_SERIALIZED_ALL(ifp);
4627 if (sc->bnx_coal_chg & BNX_RX_COAL_TICKS_CHG) {
4628 if (sc->bnx_rx_retcnt == 1) {
4629 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS,
4630 sc->bnx_rx_coal_ticks);
4633 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, 0);
4634 for (i = 0; i < sc->bnx_rx_retcnt; ++i) {
4635 CSR_WRITE_4(sc, BGE_VEC1_RX_COAL_TICKS +
4636 (i * BGE_VEC_COALSET_SIZE),
4637 sc->bnx_rx_coal_ticks);
4640 for (; i < BNX_INTR_MAX - 1; ++i) {
4641 CSR_WRITE_4(sc, BGE_VEC1_RX_COAL_TICKS +
4642 (i * BGE_VEC_COALSET_SIZE), 0);
4645 if_printf(ifp, "rx_coal_ticks -> %u\n",
4646 sc->bnx_rx_coal_ticks);
4650 if (sc->bnx_coal_chg & BNX_TX_COAL_TICKS_CHG) {
4651 if (sc->bnx_tx_ringcnt == 1) {
4652 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS,
4653 sc->bnx_tx_coal_ticks);
4656 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, 0);
4657 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
4658 CSR_WRITE_4(sc, BGE_VEC1_TX_COAL_TICKS +
4659 (i * BGE_VEC_COALSET_SIZE),
4660 sc->bnx_tx_coal_ticks);
4663 for (; i < BNX_INTR_MAX - 1; ++i) {
4664 CSR_WRITE_4(sc, BGE_VEC1_TX_COAL_TICKS +
4665 (i * BGE_VEC_COALSET_SIZE), 0);
4668 if_printf(ifp, "tx_coal_ticks -> %u\n",
4669 sc->bnx_tx_coal_ticks);
4673 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_CHG) {
4674 uint32_t rx_coal_bds;
4676 if (ifp->if_flags & IFF_NPOLLING)
4677 rx_coal_bds = sc->bnx_rx_coal_bds_poll;
4679 rx_coal_bds = sc->bnx_rx_coal_bds;
4681 if (sc->bnx_rx_retcnt == 1) {
4682 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_coal_bds);
4685 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, 0);
4686 for (i = 0; i < sc->bnx_rx_retcnt; ++i) {
4687 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS +
4688 (i * BGE_VEC_COALSET_SIZE), rx_coal_bds);
4691 for (; i < BNX_INTR_MAX - 1; ++i) {
4692 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS +
4693 (i * BGE_VEC_COALSET_SIZE), 0);
4696 if_printf(ifp, "%srx_coal_bds -> %u\n",
4697 (ifp->if_flags & IFF_NPOLLING) ? "polling " : "",
4702 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_CHG) {
4703 uint32_t tx_coal_bds;
4705 if (ifp->if_flags & IFF_NPOLLING)
4706 tx_coal_bds = sc->bnx_tx_coal_bds_poll;
4708 tx_coal_bds = sc->bnx_tx_coal_bds;
4710 if (sc->bnx_tx_ringcnt == 1) {
4711 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, tx_coal_bds);
4714 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, 0);
4715 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
4716 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS +
4717 (i * BGE_VEC_COALSET_SIZE), tx_coal_bds);
4720 for (; i < BNX_INTR_MAX - 1; ++i) {
4721 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS +
4722 (i * BGE_VEC_COALSET_SIZE), 0);
4725 if_printf(ifp, "%stx_coal_bds -> %u\n",
4726 (ifp->if_flags & IFF_NPOLLING) ? "polling " : "",
4731 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_INT_CHG) {
4732 if (sc->bnx_rx_retcnt == 1) {
4733 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT,
4734 sc->bnx_rx_coal_bds_int);
4737 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
4738 for (i = 0; i < sc->bnx_rx_retcnt; ++i) {
4739 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS_INT +
4740 (i * BGE_VEC_COALSET_SIZE),
4741 sc->bnx_rx_coal_bds_int);
4744 for (; i < BNX_INTR_MAX - 1; ++i) {
4745 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS_INT +
4746 (i * BGE_VEC_COALSET_SIZE), 0);
4749 if_printf(ifp, "rx_coal_bds_int -> %u\n",
4750 sc->bnx_rx_coal_bds_int);
4754 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_INT_CHG) {
4755 if (sc->bnx_tx_ringcnt == 1) {
4756 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT,
4757 sc->bnx_tx_coal_bds_int);
4760 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
4761 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
4762 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS_INT +
4763 (i * BGE_VEC_COALSET_SIZE),
4764 sc->bnx_tx_coal_bds_int);
4767 for (; i < BNX_INTR_MAX - 1; ++i) {
4768 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS_INT +
4769 (i * BGE_VEC_COALSET_SIZE), 0);
4772 if_printf(ifp, "tx_coal_bds_int -> %u\n",
4773 sc->bnx_tx_coal_bds_int);
4777 sc->bnx_coal_chg = 0;
4781 bnx_check_intr_rxtx(void *xintr)
4783 struct bnx_intr_data *intr = xintr;
4784 struct bnx_rx_ret_ring *ret;
4785 struct bnx_tx_ring *txr;
4788 lwkt_serialize_enter(intr->bnx_intr_serialize);
4790 KKASSERT(mycpuid == intr->bnx_intr_cpuid);
4792 ifp = &intr->bnx_sc->arpcom.ac_if;
4793 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) {
4794 lwkt_serialize_exit(intr->bnx_intr_serialize);
4798 txr = intr->bnx_txr;
4799 ret = intr->bnx_ret;
4801 if (*ret->bnx_rx_considx != ret->bnx_rx_saved_considx ||
4802 *txr->bnx_tx_considx != txr->bnx_tx_saved_considx) {
4803 if (intr->bnx_rx_check_considx == ret->bnx_rx_saved_considx &&
4804 intr->bnx_tx_check_considx == txr->bnx_tx_saved_considx) {
4805 if (!intr->bnx_intr_maylose) {
4806 intr->bnx_intr_maylose = TRUE;
4810 if_printf(ifp, "lost interrupt\n");
4811 intr->bnx_intr_func(intr->bnx_intr_arg);
4814 intr->bnx_intr_maylose = FALSE;
4815 intr->bnx_rx_check_considx = ret->bnx_rx_saved_considx;
4816 intr->bnx_tx_check_considx = txr->bnx_tx_saved_considx;
4819 callout_reset(&intr->bnx_intr_timer, BNX_INTR_CKINTVL,
4820 intr->bnx_intr_check, intr);
4821 lwkt_serialize_exit(intr->bnx_intr_serialize);
4825 bnx_check_intr_tx(void *xintr)
4827 struct bnx_intr_data *intr = xintr;
4828 struct bnx_tx_ring *txr;
4831 lwkt_serialize_enter(intr->bnx_intr_serialize);
4833 KKASSERT(mycpuid == intr->bnx_intr_cpuid);
4835 ifp = &intr->bnx_sc->arpcom.ac_if;
4836 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) {
4837 lwkt_serialize_exit(intr->bnx_intr_serialize);
4841 txr = intr->bnx_txr;
4843 if (*txr->bnx_tx_considx != txr->bnx_tx_saved_considx) {
4844 if (intr->bnx_tx_check_considx == txr->bnx_tx_saved_considx) {
4845 if (!intr->bnx_intr_maylose) {
4846 intr->bnx_intr_maylose = TRUE;
4850 if_printf(ifp, "lost interrupt\n");
4851 intr->bnx_intr_func(intr->bnx_intr_arg);
4854 intr->bnx_intr_maylose = FALSE;
4855 intr->bnx_tx_check_considx = txr->bnx_tx_saved_considx;
4858 callout_reset(&intr->bnx_intr_timer, BNX_INTR_CKINTVL,
4859 intr->bnx_intr_check, intr);
4860 lwkt_serialize_exit(intr->bnx_intr_serialize);
4864 bnx_check_intr_rx(void *xintr)
4866 struct bnx_intr_data *intr = xintr;
4867 struct bnx_rx_ret_ring *ret;
4870 lwkt_serialize_enter(intr->bnx_intr_serialize);
4872 KKASSERT(mycpuid == intr->bnx_intr_cpuid);
4874 ifp = &intr->bnx_sc->arpcom.ac_if;
4875 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) {
4876 lwkt_serialize_exit(intr->bnx_intr_serialize);
4880 ret = intr->bnx_ret;
4882 if (*ret->bnx_rx_considx != ret->bnx_rx_saved_considx) {
4883 if (intr->bnx_rx_check_considx == ret->bnx_rx_saved_considx) {
4884 if (!intr->bnx_intr_maylose) {
4885 intr->bnx_intr_maylose = TRUE;
4889 if_printf(ifp, "lost interrupt\n");
4890 intr->bnx_intr_func(intr->bnx_intr_arg);
4893 intr->bnx_intr_maylose = FALSE;
4894 intr->bnx_rx_check_considx = ret->bnx_rx_saved_considx;
4897 callout_reset(&intr->bnx_intr_timer, BNX_INTR_CKINTVL,
4898 intr->bnx_intr_check, intr);
4899 lwkt_serialize_exit(intr->bnx_intr_serialize);
4903 bnx_enable_intr(struct bnx_softc *sc)
4905 struct ifnet *ifp = &sc->arpcom.ac_if;
4908 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
4909 lwkt_serialize_handler_enable(
4910 sc->bnx_intr_data[i].bnx_intr_serialize);
4916 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
4917 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
4919 bnx_writembx(sc, intr->bnx_intr_mbx,
4920 (*intr->bnx_saved_status_tag) << 24);
4921 /* XXX Linux driver */
4922 bnx_writembx(sc, intr->bnx_intr_mbx,
4923 (*intr->bnx_saved_status_tag) << 24);
4927 * Unmask the interrupt when we stop polling.
4929 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_MISC_CTL,
4930 BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
4933 * Trigger another interrupt, since above writing
4934 * to interrupt mailbox0 may acknowledge pending
4937 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4939 if (sc->bnx_flags & BNX_FLAG_STATUSTAG_BUG) {
4941 if_printf(ifp, "status tag bug workaround\n");
4943 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
4944 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
4946 if (intr->bnx_intr_check == NULL)
4948 intr->bnx_intr_maylose = FALSE;
4949 intr->bnx_rx_check_considx = 0;
4950 intr->bnx_tx_check_considx = 0;
4951 callout_reset_bycpu(&intr->bnx_intr_timer,
4952 BNX_INTR_CKINTVL, intr->bnx_intr_check, intr,
4953 intr->bnx_intr_cpuid);
4959 bnx_disable_intr(struct bnx_softc *sc)
4963 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
4964 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
4966 callout_stop(&intr->bnx_intr_timer);
4967 intr->bnx_intr_maylose = FALSE;
4968 intr->bnx_rx_check_considx = 0;
4969 intr->bnx_tx_check_considx = 0;
4973 * Mask the interrupt when we start polling.
4975 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL,
4976 BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
4979 * Acknowledge possible asserted interrupt.
4981 for (i = 0; i < BNX_INTR_MAX; ++i)
4982 bnx_writembx(sc, sc->bnx_intr_data[i].bnx_intr_mbx, 1);
4984 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
4985 lwkt_serialize_handler_disable(
4986 sc->bnx_intr_data[i].bnx_intr_serialize);
4991 bnx_get_eaddr_mem(struct bnx_softc *sc, uint8_t ether_addr[])
4996 mac_addr = bnx_readmem_ind(sc, 0x0c14);
4997 if ((mac_addr >> 16) == 0x484b) {
4998 ether_addr[0] = (uint8_t)(mac_addr >> 8);
4999 ether_addr[1] = (uint8_t)mac_addr;
5000 mac_addr = bnx_readmem_ind(sc, 0x0c18);
5001 ether_addr[2] = (uint8_t)(mac_addr >> 24);
5002 ether_addr[3] = (uint8_t)(mac_addr >> 16);
5003 ether_addr[4] = (uint8_t)(mac_addr >> 8);
5004 ether_addr[5] = (uint8_t)mac_addr;
5011 bnx_get_eaddr_nvram(struct bnx_softc *sc, uint8_t ether_addr[])
5013 int mac_offset = BGE_EE_MAC_OFFSET;
5015 if (BNX_IS_5717_PLUS(sc)) {
5018 f = pci_get_function(sc->bnx_dev);
5020 mac_offset = BGE_EE_MAC_OFFSET_5717;
5022 mac_offset += BGE_EE_MAC_OFFSET_5717_OFF;
5025 return bnx_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN);
5029 bnx_get_eaddr_eeprom(struct bnx_softc *sc, uint8_t ether_addr[])
5031 if (sc->bnx_flags & BNX_FLAG_NO_EEPROM)
5034 return bnx_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
5039 bnx_get_eaddr(struct bnx_softc *sc, uint8_t eaddr[])
5041 static const bnx_eaddr_fcn_t bnx_eaddr_funcs[] = {
5042 /* NOTE: Order is critical */
5044 bnx_get_eaddr_nvram,
5045 bnx_get_eaddr_eeprom,
5048 const bnx_eaddr_fcn_t *func;
5050 for (func = bnx_eaddr_funcs; *func != NULL; ++func) {
5051 if ((*func)(sc, eaddr) == 0)
5054 return (*func == NULL ? ENXIO : 0);
5058 * NOTE: 'm' is not freed upon failure
5061 bnx_defrag_shortdma(struct mbuf *m)
5067 * If device receive two back-to-back send BDs with less than
5068 * or equal to 8 total bytes then the device may hang. The two
5069 * back-to-back send BDs must in the same frame for this failure
5070 * to occur. Scan mbuf chains and see whether two back-to-back
5071 * send BDs are there. If this is the case, allocate new mbuf
5072 * and copy the frame to workaround the silicon bug.
5074 for (n = m, found = 0; n != NULL; n = n->m_next) {
5085 n = m_defrag(m, MB_DONTWAIT);
5092 bnx_stop_block(struct bnx_softc *sc, bus_size_t reg, uint32_t bit)
5096 BNX_CLRBIT(sc, reg, bit);
5097 for (i = 0; i < BNX_TIMEOUT; i++) {
5098 if ((CSR_READ_4(sc, reg) & bit) == 0)
5105 bnx_link_poll(struct bnx_softc *sc)
5109 status = CSR_READ_4(sc, BGE_MAC_STS);
5110 if ((status & sc->bnx_link_chg) || sc->bnx_link_evt) {
5111 sc->bnx_link_evt = 0;
5112 sc->bnx_link_upd(sc, status);
5117 bnx_enable_msi(struct bnx_softc *sc, boolean_t is_msix)
5121 msi_mode = CSR_READ_4(sc, BGE_MSI_MODE);
5122 msi_mode |= BGE_MSIMODE_ENABLE;
5125 * 5718-PG105-R says that "one shot" mode does not work
5126 * if MSI is used, however, it obviously works.
5128 msi_mode &= ~BGE_MSIMODE_ONESHOT_DISABLE;
5130 msi_mode |= BGE_MSIMODE_MSIX_MULTIMODE;
5132 msi_mode &= ~BGE_MSIMODE_MSIX_MULTIMODE;
5133 CSR_WRITE_4(sc, BGE_MSI_MODE, msi_mode);
5137 bnx_dma_swap_options(struct bnx_softc *sc)
5139 uint32_t dma_options;
5141 dma_options = BGE_MODECTL_WORDSWAP_NONFRAME |
5142 BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA;
5143 #if BYTE_ORDER == BIG_ENDIAN
5144 dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME;
5150 bnx_setup_tso(struct bnx_tx_ring *txr, struct mbuf **mp,
5151 uint16_t *mss0, uint16_t *flags0)
5156 int thoff, iphlen, hoff, hlen;
5157 uint16_t flags, mss;
5160 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
5162 hoff = m->m_pkthdr.csum_lhlen;
5163 iphlen = m->m_pkthdr.csum_iphlen;
5164 thoff = m->m_pkthdr.csum_thlen;
5166 KASSERT(hoff > 0, ("invalid ether header len"));
5167 KASSERT(iphlen > 0, ("invalid ip header len"));
5168 KASSERT(thoff > 0, ("invalid tcp header len"));
5170 if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
5171 m = m_pullup(m, hoff + iphlen + thoff);
5178 ip = mtodoff(m, struct ip *, hoff);
5179 th = mtodoff(m, struct tcphdr *, hoff + iphlen);
5181 mss = m->m_pkthdr.tso_segsz;
5182 flags = BGE_TXBDFLAG_CPU_PRE_DMA | BGE_TXBDFLAG_CPU_POST_DMA;
5184 ip->ip_len = htons(mss + iphlen + thoff);
5187 hlen = (iphlen + thoff) >> 2;
5188 mss |= ((hlen & 0x3) << 14);
5189 flags |= ((hlen & 0xf8) << 7) | ((hlen & 0x4) << 2);
5198 bnx_create_tx_ring(struct bnx_tx_ring *txr)
5200 bus_size_t txmaxsz, txmaxsegsz;
5203 lwkt_serialize_init(&txr->bnx_tx_serialize);
5206 * Create DMA tag and maps for TX mbufs.
5208 if (txr->bnx_sc->bnx_flags & BNX_FLAG_TSO)
5209 txmaxsz = IP_MAXPACKET + sizeof(struct ether_vlan_header);
5211 txmaxsz = BNX_JUMBO_FRAMELEN;
5212 if (txr->bnx_sc->bnx_asicrev == BGE_ASICREV_BCM57766)
5213 txmaxsegsz = MCLBYTES;
5215 txmaxsegsz = PAGE_SIZE;
5216 error = bus_dma_tag_create(txr->bnx_sc->bnx_cdata.bnx_parent_tag,
5217 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
5218 txmaxsz, BNX_NSEG_NEW, txmaxsegsz,
5219 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
5222 device_printf(txr->bnx_sc->bnx_dev,
5223 "could not create TX mbuf DMA tag\n");
5227 for (i = 0; i < BGE_TX_RING_CNT; i++) {
5228 error = bus_dmamap_create(txr->bnx_tx_mtag,
5229 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
5230 &txr->bnx_tx_buf[i].bnx_tx_dmamap);
5234 for (j = 0; j < i; ++j) {
5235 bus_dmamap_destroy(txr->bnx_tx_mtag,
5236 txr->bnx_tx_buf[j].bnx_tx_dmamap);
5238 bus_dma_tag_destroy(txr->bnx_tx_mtag);
5239 txr->bnx_tx_mtag = NULL;
5241 device_printf(txr->bnx_sc->bnx_dev,
5242 "could not create TX mbuf DMA map\n");
5248 * Create DMA stuffs for TX ring.
5250 error = bnx_dma_block_alloc(txr->bnx_sc, BGE_TX_RING_SZ,
5251 &txr->bnx_tx_ring_tag,
5252 &txr->bnx_tx_ring_map,
5253 (void *)&txr->bnx_tx_ring,
5254 &txr->bnx_tx_ring_paddr);
5256 device_printf(txr->bnx_sc->bnx_dev,
5257 "could not create TX ring\n");
5261 txr->bnx_tx_flags |= BNX_TX_FLAG_SHORTDMA;
5262 txr->bnx_tx_wreg = BNX_TX_WREG_NSEGS;
5268 bnx_destroy_tx_ring(struct bnx_tx_ring *txr)
5270 /* Destroy TX mbuf DMA stuffs. */
5271 if (txr->bnx_tx_mtag != NULL) {
5274 for (i = 0; i < BGE_TX_RING_CNT; i++) {
5275 KKASSERT(txr->bnx_tx_buf[i].bnx_tx_mbuf == NULL);
5276 bus_dmamap_destroy(txr->bnx_tx_mtag,
5277 txr->bnx_tx_buf[i].bnx_tx_dmamap);
5279 bus_dma_tag_destroy(txr->bnx_tx_mtag);
5282 /* Destroy TX ring */
5283 bnx_dma_block_free(txr->bnx_tx_ring_tag,
5284 txr->bnx_tx_ring_map, txr->bnx_tx_ring);
5288 bnx_sysctl_force_defrag(SYSCTL_HANDLER_ARGS)
5290 struct bnx_softc *sc = (void *)arg1;
5291 struct ifnet *ifp = &sc->arpcom.ac_if;
5292 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0];
5293 int error, defrag, i;
5295 if (txr->bnx_tx_flags & BNX_TX_FLAG_FORCE_DEFRAG)
5300 error = sysctl_handle_int(oidp, &defrag, 0, req);
5301 if (error || req->newptr == NULL)
5304 ifnet_serialize_all(ifp);
5305 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
5306 txr = &sc->bnx_tx_ring[i];
5308 txr->bnx_tx_flags |= BNX_TX_FLAG_FORCE_DEFRAG;
5310 txr->bnx_tx_flags &= ~BNX_TX_FLAG_FORCE_DEFRAG;
5312 ifnet_deserialize_all(ifp);
5318 bnx_sysctl_tx_wreg(SYSCTL_HANDLER_ARGS)
5320 struct bnx_softc *sc = (void *)arg1;
5321 struct ifnet *ifp = &sc->arpcom.ac_if;
5322 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0];
5323 int error, tx_wreg, i;
5325 tx_wreg = txr->bnx_tx_wreg;
5326 error = sysctl_handle_int(oidp, &tx_wreg, 0, req);
5327 if (error || req->newptr == NULL)
5330 ifnet_serialize_all(ifp);
5331 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
5332 sc->bnx_tx_ring[i].bnx_tx_wreg = tx_wreg;
5333 ifnet_deserialize_all(ifp);
5339 bnx_create_rx_ret_ring(struct bnx_rx_ret_ring *ret)
5343 lwkt_serialize_init(&ret->bnx_rx_ret_serialize);
5346 * Create DMA stuffs for RX return ring.
5348 error = bnx_dma_block_alloc(ret->bnx_sc,
5349 BGE_RX_RTN_RING_SZ(BNX_RETURN_RING_CNT),
5350 &ret->bnx_rx_ret_ring_tag,
5351 &ret->bnx_rx_ret_ring_map,
5352 (void *)&ret->bnx_rx_ret_ring,
5353 &ret->bnx_rx_ret_ring_paddr);
5355 device_printf(ret->bnx_sc->bnx_dev,
5356 "could not create RX ret ring\n");
5360 /* Shadow standard ring's RX mbuf DMA tag */
5361 ret->bnx_rx_mtag = ret->bnx_std->bnx_rx_mtag;
5364 * Create tmp DMA map for RX mbufs.
5366 error = bus_dmamap_create(ret->bnx_rx_mtag, BUS_DMA_WAITOK,
5367 &ret->bnx_rx_tmpmap);
5369 device_printf(ret->bnx_sc->bnx_dev,
5370 "could not create tmp RX mbuf DMA map\n");
5371 ret->bnx_rx_mtag = NULL;
5378 bnx_destroy_rx_ret_ring(struct bnx_rx_ret_ring *ret)
5380 /* Destroy tmp RX mbuf DMA map */
5381 if (ret->bnx_rx_mtag != NULL)
5382 bus_dmamap_destroy(ret->bnx_rx_mtag, ret->bnx_rx_tmpmap);
5384 /* Destroy RX return ring */
5385 bnx_dma_block_free(ret->bnx_rx_ret_ring_tag,
5386 ret->bnx_rx_ret_ring_map, ret->bnx_rx_ret_ring);
5390 bnx_alloc_intr(struct bnx_softc *sc)
5392 struct bnx_intr_data *intr;
5396 if (sc->bnx_intr_cnt > 1) {
5397 error = bnx_alloc_msix(sc);
5400 KKASSERT(sc->bnx_intr_type == PCI_INTR_TYPE_MSIX);
5404 KKASSERT(sc->bnx_intr_cnt == 1);
5406 intr = &sc->bnx_intr_data[0];
5407 intr->bnx_ret = &sc->bnx_rx_ret_ring[0];
5408 intr->bnx_txr = &sc->bnx_tx_ring[0];
5409 intr->bnx_intr_serialize = &sc->bnx_main_serialize;
5410 intr->bnx_intr_check = bnx_check_intr_rxtx;
5411 intr->bnx_saved_status_tag = &intr->bnx_ret->bnx_saved_status_tag;
5413 sc->bnx_intr_type = pci_alloc_1intr(sc->bnx_dev, bnx_msi_enable,
5414 &intr->bnx_intr_rid, &intr_flags);
5416 intr->bnx_intr_res = bus_alloc_resource_any(sc->bnx_dev, SYS_RES_IRQ,
5417 &intr->bnx_intr_rid, intr_flags);
5418 if (intr->bnx_intr_res == NULL) {
5419 device_printf(sc->bnx_dev, "could not alloc interrupt\n");
5423 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI) {
5424 bnx_enable_msi(sc, FALSE);
5425 intr->bnx_intr_func = bnx_msi;
5427 device_printf(sc->bnx_dev, "oneshot MSI\n");
5429 intr->bnx_intr_func = bnx_intr_legacy;
5431 intr->bnx_intr_arg = sc;
5432 intr->bnx_intr_cpuid = rman_get_cpuid(intr->bnx_intr_res);
5434 intr->bnx_txr->bnx_tx_cpuid = intr->bnx_intr_cpuid;
5440 bnx_setup_intr(struct bnx_softc *sc)
5444 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
5445 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
5447 error = bus_setup_intr_descr(sc->bnx_dev, intr->bnx_intr_res,
5448 INTR_MPSAFE, intr->bnx_intr_func, intr->bnx_intr_arg,
5449 &intr->bnx_intr_hand, intr->bnx_intr_serialize,
5450 intr->bnx_intr_desc);
5452 device_printf(sc->bnx_dev,
5453 "could not set up %dth intr\n", i);
5454 bnx_teardown_intr(sc, i);
5462 bnx_teardown_intr(struct bnx_softc *sc, int cnt)
5466 for (i = 0; i < cnt; ++i) {
5467 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
5469 bus_teardown_intr(sc->bnx_dev, intr->bnx_intr_res,
5470 intr->bnx_intr_hand);
5475 bnx_free_intr(struct bnx_softc *sc)
5477 if (sc->bnx_intr_type != PCI_INTR_TYPE_MSIX) {
5478 struct bnx_intr_data *intr;
5480 KKASSERT(sc->bnx_intr_cnt <= 1);
5481 intr = &sc->bnx_intr_data[0];
5483 if (intr->bnx_intr_res != NULL) {
5484 bus_release_resource(sc->bnx_dev, SYS_RES_IRQ,
5485 intr->bnx_intr_rid, intr->bnx_intr_res);
5487 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI)
5488 pci_release_msi(sc->bnx_dev);
5490 bnx_free_msix(sc, TRUE);
5495 bnx_setup_serialize(struct bnx_softc *sc)
5500 * Allocate serializer array
5503 /* Main + RX STD + TX + RX RET */
5504 sc->bnx_serialize_cnt = 1 + 1 + sc->bnx_tx_ringcnt + sc->bnx_rx_retcnt;
5507 kmalloc(sc->bnx_serialize_cnt * sizeof(struct lwkt_serialize *),
5508 M_DEVBUF, M_WAITOK | M_ZERO);
5513 * NOTE: Order is critical
5518 KKASSERT(i < sc->bnx_serialize_cnt);
5519 sc->bnx_serialize[i++] = &sc->bnx_main_serialize;
5521 KKASSERT(i < sc->bnx_serialize_cnt);
5522 sc->bnx_serialize[i++] = &sc->bnx_rx_std_ring.bnx_rx_std_serialize;
5524 for (j = 0; j < sc->bnx_rx_retcnt; ++j) {
5525 KKASSERT(i < sc->bnx_serialize_cnt);
5526 sc->bnx_serialize[i++] =
5527 &sc->bnx_rx_ret_ring[j].bnx_rx_ret_serialize;
5530 for (j = 0; j < sc->bnx_tx_ringcnt; ++j) {
5531 KKASSERT(i < sc->bnx_serialize_cnt);
5532 sc->bnx_serialize[i++] =
5533 &sc->bnx_tx_ring[j].bnx_tx_serialize;
5536 KKASSERT(i == sc->bnx_serialize_cnt);
5540 bnx_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
5542 struct bnx_softc *sc = ifp->if_softc;
5544 ifnet_serialize_array_enter(sc->bnx_serialize,
5545 sc->bnx_serialize_cnt, slz);
5549 bnx_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
5551 struct bnx_softc *sc = ifp->if_softc;
5553 ifnet_serialize_array_exit(sc->bnx_serialize,
5554 sc->bnx_serialize_cnt, slz);
5558 bnx_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
5560 struct bnx_softc *sc = ifp->if_softc;
5562 return ifnet_serialize_array_try(sc->bnx_serialize,
5563 sc->bnx_serialize_cnt, slz);
5569 bnx_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
5570 boolean_t serialized)
5572 struct bnx_softc *sc = ifp->if_softc;
5574 ifnet_serialize_array_assert(sc->bnx_serialize, sc->bnx_serialize_cnt,
5578 #endif /* INVARIANTS */
5580 #ifdef IFPOLL_ENABLE
5583 bnx_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS)
5585 struct bnx_softc *sc = (void *)arg1;
5586 struct ifnet *ifp = &sc->arpcom.ac_if;
5589 off = sc->bnx_npoll_rxoff;
5590 error = sysctl_handle_int(oidp, &off, 0, req);
5591 if (error || req->newptr == NULL)
5596 ifnet_serialize_all(ifp);
5597 if (off >= ncpus2 || off % sc->bnx_rx_retcnt != 0) {
5601 sc->bnx_npoll_txoff = off;
5602 sc->bnx_npoll_rxoff = off;
5604 ifnet_deserialize_all(ifp);
5610 bnx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS)
5612 struct bnx_softc *sc = (void *)arg1;
5613 struct ifnet *ifp = &sc->arpcom.ac_if;
5616 off = sc->bnx_npoll_rxoff;
5617 error = sysctl_handle_int(oidp, &off, 0, req);
5618 if (error || req->newptr == NULL)
5623 ifnet_serialize_all(ifp);
5624 if (off >= ncpus2 || off % sc->bnx_rx_retcnt != 0) {
5628 sc->bnx_npoll_rxoff = off;
5630 ifnet_deserialize_all(ifp);
5636 bnx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS)
5638 struct bnx_softc *sc = (void *)arg1;
5639 struct ifnet *ifp = &sc->arpcom.ac_if;
5642 off = sc->bnx_npoll_txoff;
5643 error = sysctl_handle_int(oidp, &off, 0, req);
5644 if (error || req->newptr == NULL)
5649 ifnet_serialize_all(ifp);
5650 if (off >= ncpus2) {
5654 sc->bnx_npoll_txoff = off;
5656 ifnet_deserialize_all(ifp);
5661 #endif /* IFPOLL_ENABLE */
5664 bnx_set_tick_cpuid(struct bnx_softc *sc, boolean_t polling)
5667 sc->bnx_tick_cpuid = 0; /* XXX */
5669 sc->bnx_tick_cpuid = sc->bnx_intr_data[0].bnx_intr_cpuid;
5673 bnx_rx_std_refill_ithread(void *xstd)
5675 struct bnx_rx_std_ring *std = xstd;
5676 struct globaldata *gd = mycpu;
5680 while (!std->bnx_rx_std_stop) {
5681 if (std->bnx_rx_std_refill) {
5682 lwkt_serialize_handler_call(
5683 &std->bnx_rx_std_serialize,
5684 bnx_rx_std_refill, std, NULL);
5690 atomic_poll_release_int(&std->bnx_rx_std_running);
5693 if (!std->bnx_rx_std_refill && !std->bnx_rx_std_stop) {
5694 lwkt_deschedule_self(gd->gd_curthread);
5707 bnx_rx_std_refill(void *xstd, void *frame __unused)
5709 struct bnx_rx_std_ring *std = xstd;
5710 int cnt, refill_mask;
5716 refill_mask = std->bnx_rx_std_refill;
5717 atomic_clear_int(&std->bnx_rx_std_refill, refill_mask);
5719 while (refill_mask) {
5720 uint16_t check_idx = std->bnx_rx_std;
5723 ret_idx = bsfl(refill_mask);
5725 struct bnx_rx_buf *rb;
5728 BNX_INC(check_idx, BGE_STD_RX_RING_CNT);
5729 rb = &std->bnx_rx_std_buf[check_idx];
5730 refilled = rb->bnx_rx_refilled;
5733 bnx_setup_rxdesc_std(std, check_idx);
5734 std->bnx_rx_std = check_idx;
5737 atomic_subtract_int(
5738 &std->bnx_rx_std_used, cnt);
5739 bnx_writembx(std->bnx_sc,
5740 BGE_MBX_RX_STD_PROD_LO,
5748 refill_mask &= ~(1 << ret_idx);
5752 atomic_subtract_int(&std->bnx_rx_std_used, cnt);
5753 bnx_writembx(std->bnx_sc, BGE_MBX_RX_STD_PROD_LO,
5757 if (std->bnx_rx_std_refill)
5760 atomic_poll_release_int(&std->bnx_rx_std_running);
5763 if (std->bnx_rx_std_refill)
5768 bnx_sysctl_std_refill(SYSCTL_HANDLER_ARGS)
5770 struct bnx_softc *sc = (void *)arg1;
5771 struct ifnet *ifp = &sc->arpcom.ac_if;
5772 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0];
5773 int error, cntmax, i;
5775 cntmax = ret->bnx_rx_cntmax;
5776 error = sysctl_handle_int(oidp, &cntmax, 0, req);
5777 if (error || req->newptr == NULL)
5780 ifnet_serialize_all(ifp);
5782 if ((cntmax * sc->bnx_rx_retcnt) >= BGE_STD_RX_RING_CNT / 2) {
5787 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
5788 sc->bnx_rx_ret_ring[i].bnx_rx_cntmax = cntmax;
5792 ifnet_deserialize_all(ifp);
5798 bnx_init_rss(struct bnx_softc *sc)
5800 uint8_t key[BGE_RSS_KEYREG_CNT * BGE_RSS_KEYREG_SIZE];
5803 KKASSERT(BNX_RSS_ENABLED(sc));
5806 * Configure RSS redirect table in following fashion:
5807 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
5810 for (j = 0; j < BGE_RSS_INDIR_TBL_CNT; ++j) {
5813 for (i = 0; i < BGE_RSS_INDIR_TBLENT_CNT; ++i) {
5816 q = r % sc->bnx_rx_retcnt;
5817 tbl |= q << (BGE_RSS_INDIR_TBLENT_SHIFT *
5818 (BGE_RSS_INDIR_TBLENT_CNT - i - 1));
5822 BNX_RSS_DPRINTF(sc, 1, "tbl%d %08x\n", j, tbl);
5823 CSR_WRITE_4(sc, BGE_RSS_INDIR_TBL(j), tbl);
5826 toeplitz_get_key(key, sizeof(key));
5827 for (i = 0; i < BGE_RSS_KEYREG_CNT; ++i) {
5830 keyreg = BGE_RSS_KEYREG_VAL(key, i);
5832 BNX_RSS_DPRINTF(sc, 1, "key%d %08x\n", i, keyreg);
5833 CSR_WRITE_4(sc, BGE_RSS_KEYREG(i), keyreg);
5838 bnx_setup_ring_cnt(struct bnx_softc *sc)
5840 int msix_enable, i, msix_cnt, msix_cnt2, ring_max;
5842 sc->bnx_tx_ringcnt = 1;
5843 sc->bnx_rx_retcnt = 1;
5844 sc->bnx_intr_cnt = 1;
5846 msix_enable = device_getenv_int(sc->bnx_dev, "msix.enable",
5854 msix_cnt = pci_msix_count(sc->bnx_dev);
5859 while ((1 << (i + 1)) <= msix_cnt)
5864 * One MSI-X vector is dedicated to status or single TX queue,
5865 * so make sure that there are enough MSI-X vectors.
5867 if (msix_cnt == msix_cnt2) {
5870 * This probably will not happen; 57785/5718 families
5871 * come with at least 5 MSI-X vectors.
5874 if (msix_cnt2 <= 1) {
5875 device_printf(sc->bnx_dev,
5876 "MSI-X count %d could not be used\n", msix_cnt);
5879 device_printf(sc->bnx_dev, "MSI-X count %d is power of 2\n",
5884 * Setup RX ring count
5886 ring_max = BNX_RX_RING_MAX;
5887 if (ring_max > msix_cnt2)
5888 ring_max = msix_cnt2;
5889 sc->bnx_rx_retcnt = device_getenv_int(sc->bnx_dev, "rx_rings",
5891 sc->bnx_rx_retcnt = if_ring_count2(sc->bnx_rx_retcnt, ring_max);
5893 if (sc->bnx_rx_retcnt == 1)
5897 * We need one extra MSI-X vector for link status or
5898 * TX ring (if only one TX ring is enabled).
5900 sc->bnx_intr_cnt = sc->bnx_rx_retcnt + 1;
5903 * Setup TX ring count
5905 * Currently only BCM5719 and BCM5720 support multiple TX rings
5906 * and the TX ring count must be less than the RX ring count.
5908 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
5909 sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
5910 ring_max = BNX_TX_RING_MAX;
5911 if (ring_max > msix_cnt2)
5912 ring_max = msix_cnt2;
5913 if (ring_max > sc->bnx_rx_retcnt)
5914 ring_max = sc->bnx_rx_retcnt;
5915 sc->bnx_tx_ringcnt = device_getenv_int(sc->bnx_dev, "tx_rings",
5917 sc->bnx_tx_ringcnt = if_ring_count2(sc->bnx_tx_ringcnt,
5923 bnx_alloc_msix(struct bnx_softc *sc)
5925 struct bnx_intr_data *intr;
5926 boolean_t setup = FALSE;
5927 int error, i, offset, offset_def;
5929 KKASSERT(sc->bnx_intr_cnt > 1);
5930 KKASSERT(sc->bnx_intr_cnt == sc->bnx_rx_retcnt + 1);
5932 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) {
5936 intr = &sc->bnx_intr_data[0];
5938 intr->bnx_intr_serialize = &sc->bnx_main_serialize;
5939 intr->bnx_saved_status_tag = &sc->bnx_saved_status_tag;
5941 intr->bnx_intr_func = bnx_msix_status;
5942 intr->bnx_intr_arg = sc;
5943 intr->bnx_intr_cpuid = 0; /* XXX */
5945 ksnprintf(intr->bnx_intr_desc0, sizeof(intr->bnx_intr_desc0),
5946 "%s sts", device_get_nameunit(sc->bnx_dev));
5947 intr->bnx_intr_desc = intr->bnx_intr_desc0;
5952 if (sc->bnx_rx_retcnt == ncpus2) {
5955 offset_def = (sc->bnx_rx_retcnt *
5956 device_get_unit(sc->bnx_dev)) % ncpus2;
5958 offset = device_getenv_int(sc->bnx_dev,
5959 "msix.offset", offset_def);
5960 if (offset >= ncpus2 ||
5961 offset % sc->bnx_rx_retcnt != 0) {
5962 device_printf(sc->bnx_dev,
5963 "invalid msix.offset %d, use %d\n",
5964 offset, offset_def);
5965 offset = offset_def;
5969 for (i = 1; i < sc->bnx_intr_cnt; ++i) {
5972 intr = &sc->bnx_intr_data[i];
5974 KKASSERT(idx < sc->bnx_rx_retcnt);
5975 intr->bnx_ret = &sc->bnx_rx_ret_ring[idx];
5976 if (idx < sc->bnx_tx_ringcnt) {
5977 intr->bnx_txr = &sc->bnx_tx_ring[idx];
5978 intr->bnx_ret->bnx_txr = intr->bnx_txr;
5981 intr->bnx_intr_serialize =
5982 &intr->bnx_ret->bnx_rx_ret_serialize;
5983 intr->bnx_saved_status_tag =
5984 &intr->bnx_ret->bnx_saved_status_tag;
5986 intr->bnx_intr_arg = intr->bnx_ret;
5987 KKASSERT(idx + offset < ncpus2);
5988 intr->bnx_intr_cpuid = idx + offset;
5990 if (intr->bnx_txr == NULL) {
5991 intr->bnx_intr_check = bnx_check_intr_rx;
5992 intr->bnx_intr_func = bnx_msix_rx;
5993 ksnprintf(intr->bnx_intr_desc0,
5994 sizeof(intr->bnx_intr_desc0), "%s rx%d",
5995 device_get_nameunit(sc->bnx_dev), idx);
5997 intr->bnx_intr_check = bnx_check_intr_rxtx;
5998 intr->bnx_intr_func = bnx_msix_rxtx;
5999 ksnprintf(intr->bnx_intr_desc0,
6000 sizeof(intr->bnx_intr_desc0), "%s rxtx%d",
6001 device_get_nameunit(sc->bnx_dev), idx);
6003 intr->bnx_txr->bnx_tx_cpuid =
6004 intr->bnx_intr_cpuid;
6006 intr->bnx_intr_desc = intr->bnx_intr_desc0;
6008 intr->bnx_ret->bnx_msix_mbx = intr->bnx_intr_mbx;
6012 * TX ring and link status
6014 offset_def = device_get_unit(sc->bnx_dev) % ncpus2;
6015 offset = device_getenv_int(sc->bnx_dev, "msix.txoff",
6017 if (offset >= ncpus2) {
6018 device_printf(sc->bnx_dev,
6019 "invalid msix.txoff %d, use %d\n",
6020 offset, offset_def);
6021 offset = offset_def;
6024 intr = &sc->bnx_intr_data[0];
6026 intr->bnx_txr = &sc->bnx_tx_ring[0];
6027 intr->bnx_intr_serialize = &sc->bnx_main_serialize;
6028 intr->bnx_intr_check = bnx_check_intr_tx;
6029 intr->bnx_saved_status_tag =
6030 &intr->bnx_txr->bnx_saved_status_tag;
6032 intr->bnx_intr_func = bnx_msix_tx_status;
6033 intr->bnx_intr_arg = intr->bnx_txr;
6034 intr->bnx_intr_cpuid = offset;
6036 ksnprintf(intr->bnx_intr_desc0, sizeof(intr->bnx_intr_desc0),
6037 "%s ststx", device_get_nameunit(sc->bnx_dev));
6038 intr->bnx_intr_desc = intr->bnx_intr_desc0;
6040 intr->bnx_txr->bnx_tx_cpuid = intr->bnx_intr_cpuid;
6045 if (sc->bnx_rx_retcnt == ncpus2) {
6048 offset_def = (sc->bnx_rx_retcnt *
6049 device_get_unit(sc->bnx_dev)) % ncpus2;
6051 offset = device_getenv_int(sc->bnx_dev,
6052 "msix.rxoff", offset_def);
6053 if (offset >= ncpus2 ||
6054 offset % sc->bnx_rx_retcnt != 0) {
6055 device_printf(sc->bnx_dev,
6056 "invalid msix.rxoff %d, use %d\n",
6057 offset, offset_def);
6058 offset = offset_def;
6062 for (i = 1; i < sc->bnx_intr_cnt; ++i) {
6065 intr = &sc->bnx_intr_data[i];
6067 KKASSERT(idx < sc->bnx_rx_retcnt);
6068 intr->bnx_ret = &sc->bnx_rx_ret_ring[idx];
6069 intr->bnx_intr_serialize =
6070 &intr->bnx_ret->bnx_rx_ret_serialize;
6071 intr->bnx_intr_check = bnx_check_intr_rx;
6072 intr->bnx_saved_status_tag =
6073 &intr->bnx_ret->bnx_saved_status_tag;
6075 intr->bnx_intr_func = bnx_msix_rx;
6076 intr->bnx_intr_arg = intr->bnx_ret;
6077 KKASSERT(idx + offset < ncpus2);
6078 intr->bnx_intr_cpuid = idx + offset;
6080 ksnprintf(intr->bnx_intr_desc0,
6081 sizeof(intr->bnx_intr_desc0), "%s rx%d",
6082 device_get_nameunit(sc->bnx_dev), idx);
6083 intr->bnx_intr_desc = intr->bnx_intr_desc0;
6085 intr->bnx_ret->bnx_msix_mbx = intr->bnx_intr_mbx;
6089 if (BNX_IS_5717_PLUS(sc)) {
6090 sc->bnx_msix_mem_rid = PCIR_BAR(4);
6092 if (sc->bnx_res2 == NULL)
6093 sc->bnx_msix_mem_rid = PCIR_BAR(2);
6095 if (sc->bnx_msix_mem_rid != 0) {
6096 sc->bnx_msix_mem_res = bus_alloc_resource_any(sc->bnx_dev,
6097 SYS_RES_MEMORY, &sc->bnx_msix_mem_rid, RF_ACTIVE);
6098 if (sc->bnx_msix_mem_res == NULL) {
6099 device_printf(sc->bnx_dev,
6100 "could not alloc MSI-X table\n");
6105 bnx_enable_msi(sc, TRUE);
6107 error = pci_setup_msix(sc->bnx_dev);
6109 device_printf(sc->bnx_dev, "could not setup MSI-X\n");
6114 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
6115 intr = &sc->bnx_intr_data[i];
6117 error = pci_alloc_msix_vector(sc->bnx_dev, i,
6118 &intr->bnx_intr_rid, intr->bnx_intr_cpuid);
6120 device_printf(sc->bnx_dev,
6121 "could not alloc MSI-X %d on cpu%d\n",
6122 i, intr->bnx_intr_cpuid);
6126 intr->bnx_intr_res = bus_alloc_resource_any(sc->bnx_dev,
6127 SYS_RES_IRQ, &intr->bnx_intr_rid, RF_ACTIVE);
6128 if (intr->bnx_intr_res == NULL) {
6129 device_printf(sc->bnx_dev,
6130 "could not alloc MSI-X %d resource\n", i);
6136 pci_enable_msix(sc->bnx_dev);
6137 sc->bnx_intr_type = PCI_INTR_TYPE_MSIX;
6140 bnx_free_msix(sc, setup);
6145 bnx_free_msix(struct bnx_softc *sc, boolean_t setup)
6149 KKASSERT(sc->bnx_intr_cnt > 1);
6151 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
6152 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
6154 if (intr->bnx_intr_res != NULL) {
6155 bus_release_resource(sc->bnx_dev, SYS_RES_IRQ,
6156 intr->bnx_intr_rid, intr->bnx_intr_res);
6158 if (intr->bnx_intr_rid >= 0) {
6159 pci_release_msix_vector(sc->bnx_dev,
6160 intr->bnx_intr_rid);
6164 pci_teardown_msix(sc->bnx_dev);
6168 bnx_rx_std_refill_sched_ipi(void *xret)
6170 struct bnx_rx_ret_ring *ret = xret;
6171 struct bnx_rx_std_ring *std = ret->bnx_std;
6172 struct globaldata *gd = mycpu;
6176 atomic_set_int(&std->bnx_rx_std_refill, ret->bnx_rx_mask);
6179 KKASSERT(std->bnx_rx_std_ithread.td_gd == gd);
6180 lwkt_schedule(&std->bnx_rx_std_ithread);
6186 bnx_rx_std_refill_stop(void *xstd)
6188 struct bnx_rx_std_ring *std = xstd;
6189 struct globaldata *gd = mycpu;
6193 std->bnx_rx_std_stop = 1;
6196 KKASSERT(std->bnx_rx_std_ithread.td_gd == gd);
6197 lwkt_schedule(&std->bnx_rx_std_ithread);
6203 bnx_serialize_skipmain(struct bnx_softc *sc)
6205 lwkt_serialize_array_enter(sc->bnx_serialize,
6206 sc->bnx_serialize_cnt, 1);
6210 bnx_deserialize_skipmain(struct bnx_softc *sc)
6212 lwkt_serialize_array_exit(sc->bnx_serialize,
6213 sc->bnx_serialize_cnt, 1);
6217 bnx_rx_std_refill_sched(struct bnx_rx_ret_ring *ret,
6218 struct bnx_rx_std_ring *std)
6220 struct globaldata *gd = mycpu;
6222 ret->bnx_rx_cnt = 0;
6227 atomic_set_int(&std->bnx_rx_std_refill, ret->bnx_rx_mask);
6229 if (atomic_poll_acquire_int(&std->bnx_rx_std_running)) {
6230 if (std->bnx_rx_std_ithread.td_gd == gd) {
6231 lwkt_schedule(&std->bnx_rx_std_ithread);
6234 std->bnx_rx_std_ithread.td_gd,
6235 bnx_rx_std_refill_sched_ipi, ret);
6242 static struct pktinfo *
6243 bnx_rss_info(struct pktinfo *pi, const struct bge_rx_bd *cur_rx)
6245 /* Don't pick up IPv6 packet */
6246 if (cur_rx->bge_flags & BGE_RXBDFLAG_IPV6)
6249 /* Don't pick up IP packet w/o IP checksum */
6250 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) == 0 ||
6251 (cur_rx->bge_error_flag & BGE_RXERRFLAG_IP_CSUM_NOK))
6254 /* Don't pick up IP packet w/o TCP/UDP checksum */
6255 if ((cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) == 0)
6258 /* May be IP fragment */
6259 if (cur_rx->bge_tcp_udp_csum != 0xffff)
6262 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_IS_TCP)
6263 pi->pi_l3proto = IPPROTO_TCP;
6265 pi->pi_l3proto = IPPROTO_UDP;
6266 pi->pi_netisr = NETISR_IP;
6273 bnx_sig_pre_reset(struct bnx_softc *sc, int type)
6275 if (type == BNX_RESET_START || type == BNX_RESET_SUSPEND)
6276 bnx_ape_driver_state_change(sc, type);
6280 bnx_sig_post_reset(struct bnx_softc *sc, int type)
6282 if (type == BNX_RESET_SHUTDOWN)
6283 bnx_ape_driver_state_change(sc, type);
6287 * Clear all stale locks and select the lock for this driver instance.
6290 bnx_ape_lock_init(struct bnx_softc *sc)
6292 uint32_t bit, regbase;
6295 regbase = BGE_APE_PER_LOCK_GRANT;
6297 /* Clear any stale locks. */
6298 for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) {
6300 case BGE_APE_LOCK_PHY0:
6301 case BGE_APE_LOCK_PHY1:
6302 case BGE_APE_LOCK_PHY2:
6303 case BGE_APE_LOCK_PHY3:
6304 bit = BGE_APE_LOCK_GRANT_DRIVER0;
6308 if (sc->bnx_func_addr == 0)
6309 bit = BGE_APE_LOCK_GRANT_DRIVER0;
6311 bit = 1 << sc->bnx_func_addr;
6314 APE_WRITE_4(sc, regbase + 4 * i, bit);
6317 /* Select the PHY lock based on the device's function number. */
6318 switch (sc->bnx_func_addr) {
6320 sc->bnx_phy_ape_lock = BGE_APE_LOCK_PHY0;
6324 sc->bnx_phy_ape_lock = BGE_APE_LOCK_PHY1;
6328 sc->bnx_phy_ape_lock = BGE_APE_LOCK_PHY2;
6332 sc->bnx_phy_ape_lock = BGE_APE_LOCK_PHY3;
6336 device_printf(sc->bnx_dev,
6337 "PHY lock not supported on this function\n");
6343 * Check for APE firmware, set flags, and print version info.
6346 bnx_ape_read_fw_ver(struct bnx_softc *sc)
6349 uint32_t apedata, features;
6351 /* Check for a valid APE signature in shared memory. */
6352 apedata = APE_READ_4(sc, BGE_APE_SEG_SIG);
6353 if (apedata != BGE_APE_SEG_SIG_MAGIC) {
6354 device_printf(sc->bnx_dev, "no APE signature\n");
6355 sc->bnx_mfw_flags &= ~BNX_MFW_ON_APE;
6359 /* Check if APE firmware is running. */
6360 apedata = APE_READ_4(sc, BGE_APE_FW_STATUS);
6361 if ((apedata & BGE_APE_FW_STATUS_READY) == 0) {
6362 device_printf(sc->bnx_dev, "APE signature found "
6363 "but FW status not ready! 0x%08x\n", apedata);
6367 sc->bnx_mfw_flags |= BNX_MFW_ON_APE;
6369 /* Fetch the APE firwmare type and version. */
6370 apedata = APE_READ_4(sc, BGE_APE_FW_VERSION);
6371 features = APE_READ_4(sc, BGE_APE_FW_FEATURES);
6372 if (features & BGE_APE_FW_FEATURE_NCSI) {
6373 sc->bnx_mfw_flags |= BNX_MFW_TYPE_NCSI;
6375 } else if (features & BGE_APE_FW_FEATURE_DASH) {
6376 sc->bnx_mfw_flags |= BNX_MFW_TYPE_DASH;
6382 /* Print the APE firmware version. */
6383 device_printf(sc->bnx_dev, "APE FW version: %s v%d.%d.%d.%d\n",
6385 (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT,
6386 (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT,
6387 (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT,
6388 (apedata & BGE_APE_FW_VERSION_BLDMSK));
6392 bnx_ape_lock(struct bnx_softc *sc, int locknum)
6394 uint32_t bit, gnt, req, status;
6397 if ((sc->bnx_mfw_flags & BNX_MFW_ON_APE) == 0)
6400 /* Lock request/grant registers have different bases. */
6401 req = BGE_APE_PER_LOCK_REQ;
6402 gnt = BGE_APE_PER_LOCK_GRANT;
6407 case BGE_APE_LOCK_GPIO:
6408 /* Lock required when using GPIO. */
6409 if (sc->bnx_func_addr == 0)
6410 bit = BGE_APE_LOCK_REQ_DRIVER0;
6412 bit = 1 << sc->bnx_func_addr;
6415 case BGE_APE_LOCK_GRC:
6416 /* Lock required to reset the device. */
6417 if (sc->bnx_func_addr == 0)
6418 bit = BGE_APE_LOCK_REQ_DRIVER0;
6420 bit = 1 << sc->bnx_func_addr;
6423 case BGE_APE_LOCK_MEM:
6424 /* Lock required when accessing certain APE memory. */
6425 if (sc->bnx_func_addr == 0)
6426 bit = BGE_APE_LOCK_REQ_DRIVER0;
6428 bit = 1 << sc->bnx_func_addr;
6431 case BGE_APE_LOCK_PHY0:
6432 case BGE_APE_LOCK_PHY1:
6433 case BGE_APE_LOCK_PHY2:
6434 case BGE_APE_LOCK_PHY3:
6435 /* Lock required when accessing PHYs. */
6436 bit = BGE_APE_LOCK_REQ_DRIVER0;
6443 /* Request a lock. */
6444 APE_WRITE_4(sc, req + off, bit);
6446 /* Wait up to 1 second to acquire lock. */
6447 for (i = 0; i < 20000; i++) {
6448 status = APE_READ_4(sc, gnt + off);
6454 /* Handle any errors. */
6455 if (status != bit) {
6456 if_printf(&sc->arpcom.ac_if, "APE lock %d request failed! "
6457 "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n",
6458 locknum, req + off, bit & 0xFFFF, gnt + off,
6460 /* Revoke the lock request. */
6461 APE_WRITE_4(sc, gnt + off, bit);
6469 bnx_ape_unlock(struct bnx_softc *sc, int locknum)
6474 if ((sc->bnx_mfw_flags & BNX_MFW_ON_APE) == 0)
6477 gnt = BGE_APE_PER_LOCK_GRANT;
6482 case BGE_APE_LOCK_GPIO:
6483 if (sc->bnx_func_addr == 0)
6484 bit = BGE_APE_LOCK_GRANT_DRIVER0;
6486 bit = 1 << sc->bnx_func_addr;
6489 case BGE_APE_LOCK_GRC:
6490 if (sc->bnx_func_addr == 0)
6491 bit = BGE_APE_LOCK_GRANT_DRIVER0;
6493 bit = 1 << sc->bnx_func_addr;
6496 case BGE_APE_LOCK_MEM:
6497 if (sc->bnx_func_addr == 0)
6498 bit = BGE_APE_LOCK_GRANT_DRIVER0;
6500 bit = 1 << sc->bnx_func_addr;
6503 case BGE_APE_LOCK_PHY0:
6504 case BGE_APE_LOCK_PHY1:
6505 case BGE_APE_LOCK_PHY2:
6506 case BGE_APE_LOCK_PHY3:
6507 bit = BGE_APE_LOCK_GRANT_DRIVER0;
6514 APE_WRITE_4(sc, gnt + off, bit);
6518 * Send an event to the APE firmware.
6521 bnx_ape_send_event(struct bnx_softc *sc, uint32_t event)
6526 /* NCSI does not support APE events. */
6527 if ((sc->bnx_mfw_flags & BNX_MFW_ON_APE) == 0)
6530 /* Wait up to 1ms for APE to service previous event. */
6531 for (i = 10; i > 0; i--) {
6532 if (bnx_ape_lock(sc, BGE_APE_LOCK_MEM) != 0)
6534 apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS);
6535 if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) {
6536 APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event |
6537 BGE_APE_EVENT_STATUS_EVENT_PENDING);
6538 bnx_ape_unlock(sc, BGE_APE_LOCK_MEM);
6539 APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1);
6542 bnx_ape_unlock(sc, BGE_APE_LOCK_MEM);
6546 if_printf(&sc->arpcom.ac_if,
6547 "APE event 0x%08x send timed out\n", event);
6552 bnx_ape_driver_state_change(struct bnx_softc *sc, int kind)
6554 uint32_t apedata, event;
6556 if ((sc->bnx_mfw_flags & BNX_MFW_ON_APE) == 0)
6560 case BNX_RESET_START:
6561 /* If this is the first load, clear the load counter. */
6562 apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG);
6563 if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC) {
6564 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0);
6566 apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT);
6567 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata);
6569 APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG,
6570 BGE_APE_HOST_SEG_SIG_MAGIC);
6571 APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN,
6572 BGE_APE_HOST_SEG_LEN_MAGIC);
6574 /* Add some version info if bnx(4) supports it. */
6575 APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID,
6576 BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0));
6577 APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR,
6578 BGE_APE_HOST_BEHAV_NO_PHYLOCK);
6579 APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS,
6580 BGE_APE_HOST_HEARTBEAT_INT_DISABLE);
6581 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
6582 BGE_APE_HOST_DRVR_STATE_START);
6583 event = BGE_APE_EVENT_STATUS_STATE_START;
6586 case BNX_RESET_SHUTDOWN:
6587 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
6588 BGE_APE_HOST_DRVR_STATE_UNLOAD);
6589 event = BGE_APE_EVENT_STATUS_STATE_UNLOAD;
6592 case BNX_RESET_SUSPEND:
6593 event = BGE_APE_EVENT_STATUS_STATE_SUSPEND;
6600 bnx_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT |
6601 BGE_APE_EVENT_STATUS_STATE_CHNGE);