2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
37 #include "opt_ifpoll.h"
39 #include <sys/param.h>
41 #include <sys/endian.h>
42 #include <sys/kernel.h>
43 #include <sys/interrupt.h>
45 #include <sys/malloc.h>
46 #include <sys/queue.h>
48 #include <sys/serialize.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/sysctl.h>
53 #include <netinet/ip.h>
54 #include <netinet/tcp.h>
57 #include <net/ethernet.h>
59 #include <net/if_arp.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/if_poll.h>
63 #include <net/if_types.h>
64 #include <net/ifq_var.h>
65 #include <net/toeplitz.h>
66 #include <net/toeplitz2.h>
67 #include <net/vlan/if_vlan_var.h>
68 #include <net/vlan/if_vlan_ether.h>
70 #include <dev/netif/mii_layer/mii.h>
71 #include <dev/netif/mii_layer/miivar.h>
72 #include <dev/netif/mii_layer/brgphyreg.h>
75 #include <bus/pci/pcireg.h>
76 #include <bus/pci/pcivar.h>
78 #include <dev/netif/bge/if_bgereg.h>
79 #include <dev/netif/bnx/if_bnxvar.h>
81 /* "device miibus" required. See GENERIC if you get errors here. */
82 #include "miibus_if.h"
84 #define BNX_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
86 #define BNX_RESET_SHUTDOWN 0
87 #define BNX_RESET_START 1
88 #define BNX_RESET_SUSPEND 2
90 #define BNX_INTR_CKINTVL ((10 * hz) / 1000) /* 10ms */
93 #define BNX_RSS_DPRINTF(sc, lvl, fmt, ...) \
95 if (sc->bnx_rss_debug >= lvl) \
96 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \
98 #else /* !BNX_RSS_DEBUG */
99 #define BNX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
100 #endif /* BNX_RSS_DEBUG */
102 static const struct bnx_type {
107 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717,
108 "Broadcom BCM5717 Gigabit Ethernet" },
109 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717C,
110 "Broadcom BCM5717C Gigabit Ethernet" },
111 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718,
112 "Broadcom BCM5718 Gigabit Ethernet" },
113 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5719,
114 "Broadcom BCM5719 Gigabit Ethernet" },
115 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720_ALT,
116 "Broadcom BCM5720 Gigabit Ethernet" },
118 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5725,
119 "Broadcom BCM5725 Gigabit Ethernet" },
120 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5727,
121 "Broadcom BCM5727 Gigabit Ethernet" },
122 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5762,
123 "Broadcom BCM5762 Gigabit Ethernet" },
125 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57761,
126 "Broadcom BCM57761 Gigabit Ethernet" },
127 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57762,
128 "Broadcom BCM57762 Gigabit Ethernet" },
129 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57765,
130 "Broadcom BCM57765 Gigabit Ethernet" },
131 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57766,
132 "Broadcom BCM57766 Gigabit Ethernet" },
133 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57781,
134 "Broadcom BCM57781 Gigabit Ethernet" },
135 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57782,
136 "Broadcom BCM57782 Gigabit Ethernet" },
137 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57785,
138 "Broadcom BCM57785 Gigabit Ethernet" },
139 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57786,
140 "Broadcom BCM57786 Gigabit Ethernet" },
141 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57791,
142 "Broadcom BCM57791 Fast Ethernet" },
143 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57795,
144 "Broadcom BCM57795 Fast Ethernet" },
149 static const int bnx_tx_mailbox[BNX_TX_RING_MAX] = {
150 BGE_MBX_TX_HOST_PROD0_LO,
151 BGE_MBX_TX_HOST_PROD0_HI,
152 BGE_MBX_TX_HOST_PROD1_LO,
153 BGE_MBX_TX_HOST_PROD1_HI
156 #define BNX_IS_JUMBO_CAPABLE(sc) ((sc)->bnx_flags & BNX_FLAG_JUMBO)
157 #define BNX_IS_5717_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_5717_PLUS)
158 #define BNX_IS_57765_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_57765_PLUS)
159 #define BNX_IS_57765_FAMILY(sc) \
160 ((sc)->bnx_flags & BNX_FLAG_57765_FAMILY)
162 typedef int (*bnx_eaddr_fcn_t)(struct bnx_softc *, uint8_t[]);
164 static int bnx_probe(device_t);
165 static int bnx_attach(device_t);
166 static int bnx_detach(device_t);
167 static void bnx_shutdown(device_t);
168 static int bnx_suspend(device_t);
169 static int bnx_resume(device_t);
170 static int bnx_miibus_readreg(device_t, int, int);
171 static int bnx_miibus_writereg(device_t, int, int, int);
172 static void bnx_miibus_statchg(device_t);
174 static int bnx_handle_status(struct bnx_softc *);
176 static void bnx_npoll(struct ifnet *, struct ifpoll_info *);
177 static void bnx_npoll_rx(struct ifnet *, void *, int);
178 static void bnx_npoll_tx(struct ifnet *, void *, int);
179 static void bnx_npoll_tx_notag(struct ifnet *, void *, int);
180 static void bnx_npoll_status(struct ifnet *);
181 static void bnx_npoll_status_notag(struct ifnet *);
183 static void bnx_intr_legacy(void *);
184 static void bnx_msi(void *);
185 static void bnx_intr(struct bnx_softc *);
186 static void bnx_msix_status(void *);
187 static void bnx_msix_tx_status(void *);
188 static void bnx_msix_rx(void *);
189 static void bnx_msix_rxtx(void *);
190 static void bnx_enable_intr(struct bnx_softc *);
191 static void bnx_disable_intr(struct bnx_softc *);
192 static void bnx_txeof(struct bnx_tx_ring *, uint16_t);
193 static void bnx_rxeof(struct bnx_rx_ret_ring *, uint16_t, int);
194 static int bnx_alloc_intr(struct bnx_softc *);
195 static int bnx_setup_intr(struct bnx_softc *);
196 static void bnx_free_intr(struct bnx_softc *);
197 static void bnx_teardown_intr(struct bnx_softc *, int);
198 static int bnx_alloc_msix(struct bnx_softc *);
199 static void bnx_free_msix(struct bnx_softc *, boolean_t);
200 static void bnx_check_intr_rxtx(void *);
201 static void bnx_check_intr_rx(void *);
202 static void bnx_check_intr_tx(void *);
203 static void bnx_rx_std_refill_ithread(void *);
204 static void bnx_rx_std_refill(void *, void *);
205 static void bnx_rx_std_refill_sched_ipi(void *);
206 static void bnx_rx_std_refill_stop(void *);
207 static void bnx_rx_std_refill_sched(struct bnx_rx_ret_ring *,
208 struct bnx_rx_std_ring *);
210 static void bnx_start(struct ifnet *, struct ifaltq_subque *);
211 static int bnx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
212 static void bnx_init(void *);
213 static void bnx_stop(struct bnx_softc *);
214 static void bnx_watchdog(struct ifaltq_subque *);
215 static int bnx_ifmedia_upd(struct ifnet *);
216 static void bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *);
217 static void bnx_tick(void *);
218 static void bnx_serialize(struct ifnet *, enum ifnet_serialize);
219 static void bnx_deserialize(struct ifnet *, enum ifnet_serialize);
220 static int bnx_tryserialize(struct ifnet *, enum ifnet_serialize);
222 static void bnx_serialize_assert(struct ifnet *, enum ifnet_serialize,
225 static void bnx_serialize_skipmain(struct bnx_softc *);
226 static void bnx_deserialize_skipmain(struct bnx_softc *sc);
228 static int bnx_alloc_jumbo_mem(struct bnx_softc *);
229 static void bnx_free_jumbo_mem(struct bnx_softc *);
230 static struct bnx_jslot
231 *bnx_jalloc(struct bnx_softc *);
232 static void bnx_jfree(void *);
233 static void bnx_jref(void *);
234 static int bnx_newbuf_std(struct bnx_rx_ret_ring *, int, int);
235 static int bnx_newbuf_jumbo(struct bnx_softc *, int, int);
236 static void bnx_setup_rxdesc_std(struct bnx_rx_std_ring *, int);
237 static void bnx_setup_rxdesc_jumbo(struct bnx_softc *, int);
238 static int bnx_init_rx_ring_std(struct bnx_rx_std_ring *);
239 static void bnx_free_rx_ring_std(struct bnx_rx_std_ring *);
240 static int bnx_init_rx_ring_jumbo(struct bnx_softc *);
241 static void bnx_free_rx_ring_jumbo(struct bnx_softc *);
242 static void bnx_free_tx_ring(struct bnx_tx_ring *);
243 static int bnx_init_tx_ring(struct bnx_tx_ring *);
244 static int bnx_create_tx_ring(struct bnx_tx_ring *);
245 static void bnx_destroy_tx_ring(struct bnx_tx_ring *);
246 static int bnx_create_rx_ret_ring(struct bnx_rx_ret_ring *);
247 static void bnx_destroy_rx_ret_ring(struct bnx_rx_ret_ring *);
248 static int bnx_dma_alloc(device_t);
249 static void bnx_dma_free(struct bnx_softc *);
250 static int bnx_dma_block_alloc(struct bnx_softc *, bus_size_t,
251 bus_dma_tag_t *, bus_dmamap_t *, void **, bus_addr_t *);
252 static void bnx_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
254 bnx_defrag_shortdma(struct mbuf *);
255 static int bnx_encap(struct bnx_tx_ring *, struct mbuf **,
257 static int bnx_setup_tso(struct bnx_tx_ring *, struct mbuf **,
258 uint16_t *, uint16_t *);
259 static void bnx_setup_serialize(struct bnx_softc *);
260 static void bnx_set_tick_cpuid(struct bnx_softc *, boolean_t);
261 static void bnx_setup_ring_cnt(struct bnx_softc *);
263 static struct pktinfo *bnx_rss_info(struct pktinfo *,
264 const struct bge_rx_bd *);
265 static void bnx_init_rss(struct bnx_softc *);
266 static void bnx_reset(struct bnx_softc *);
267 static int bnx_chipinit(struct bnx_softc *);
268 static int bnx_blockinit(struct bnx_softc *);
269 static void bnx_stop_block(struct bnx_softc *, bus_size_t, uint32_t);
270 static void bnx_enable_msi(struct bnx_softc *, boolean_t);
271 static void bnx_setmulti(struct bnx_softc *);
272 static void bnx_setpromisc(struct bnx_softc *);
273 static void bnx_stats_update_regs(struct bnx_softc *);
274 static uint32_t bnx_dma_swap_options(struct bnx_softc *);
276 static uint32_t bnx_readmem_ind(struct bnx_softc *, uint32_t);
277 static void bnx_writemem_ind(struct bnx_softc *, uint32_t, uint32_t);
279 static uint32_t bnx_readreg_ind(struct bnx_softc *, uint32_t);
281 static void bnx_writemem_direct(struct bnx_softc *, uint32_t, uint32_t);
282 static void bnx_writembx(struct bnx_softc *, int, int);
283 static int bnx_read_nvram(struct bnx_softc *, caddr_t, int, int);
284 static uint8_t bnx_eeprom_getbyte(struct bnx_softc *, uint32_t, uint8_t *);
285 static int bnx_read_eeprom(struct bnx_softc *, caddr_t, uint32_t, size_t);
287 static void bnx_tbi_link_upd(struct bnx_softc *, uint32_t);
288 static void bnx_copper_link_upd(struct bnx_softc *, uint32_t);
289 static void bnx_autopoll_link_upd(struct bnx_softc *, uint32_t);
290 static void bnx_link_poll(struct bnx_softc *);
292 static int bnx_get_eaddr_mem(struct bnx_softc *, uint8_t[]);
293 static int bnx_get_eaddr_nvram(struct bnx_softc *, uint8_t[]);
294 static int bnx_get_eaddr_eeprom(struct bnx_softc *, uint8_t[]);
295 static int bnx_get_eaddr(struct bnx_softc *, uint8_t[]);
297 static void bnx_coal_change(struct bnx_softc *);
298 static int bnx_sysctl_force_defrag(SYSCTL_HANDLER_ARGS);
299 static int bnx_sysctl_tx_wreg(SYSCTL_HANDLER_ARGS);
300 static int bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS);
301 static int bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS);
302 static int bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS);
303 static int bnx_sysctl_rx_coal_bds_poll(SYSCTL_HANDLER_ARGS);
304 static int bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS);
305 static int bnx_sysctl_tx_coal_bds_poll(SYSCTL_HANDLER_ARGS);
306 static int bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS);
307 static int bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS);
308 static int bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *,
311 static int bnx_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS);
312 static int bnx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS);
313 static int bnx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS);
315 static int bnx_sysctl_std_refill(SYSCTL_HANDLER_ARGS);
317 static void bnx_sig_post_reset(struct bnx_softc *, int);
318 static void bnx_sig_pre_reset(struct bnx_softc *, int);
319 static void bnx_ape_lock_init(struct bnx_softc *);
320 static void bnx_ape_read_fw_ver(struct bnx_softc *);
321 static int bnx_ape_lock(struct bnx_softc *, int);
322 static void bnx_ape_unlock(struct bnx_softc *, int);
323 static void bnx_ape_send_event(struct bnx_softc *, uint32_t);
324 static void bnx_ape_driver_state_change(struct bnx_softc *, int);
326 static int bnx_msi_enable = 1;
327 static int bnx_msix_enable = 1;
329 static int bnx_rx_rings = 0; /* auto */
330 static int bnx_tx_rings = 0; /* auto */
332 TUNABLE_INT("hw.bnx.msi.enable", &bnx_msi_enable);
333 TUNABLE_INT("hw.bnx.msix.enable", &bnx_msix_enable);
334 TUNABLE_INT("hw.bnx.rx_rings", &bnx_rx_rings);
335 TUNABLE_INT("hw.bnx.tx_rings", &bnx_tx_rings);
337 static device_method_t bnx_methods[] = {
338 /* Device interface */
339 DEVMETHOD(device_probe, bnx_probe),
340 DEVMETHOD(device_attach, bnx_attach),
341 DEVMETHOD(device_detach, bnx_detach),
342 DEVMETHOD(device_shutdown, bnx_shutdown),
343 DEVMETHOD(device_suspend, bnx_suspend),
344 DEVMETHOD(device_resume, bnx_resume),
347 DEVMETHOD(bus_print_child, bus_generic_print_child),
348 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
351 DEVMETHOD(miibus_readreg, bnx_miibus_readreg),
352 DEVMETHOD(miibus_writereg, bnx_miibus_writereg),
353 DEVMETHOD(miibus_statchg, bnx_miibus_statchg),
358 static DEFINE_CLASS_0(bnx, bnx_driver, bnx_methods, sizeof(struct bnx_softc));
359 static devclass_t bnx_devclass;
361 DECLARE_DUMMY_MODULE(if_bnx);
362 MODULE_DEPEND(if_bnx, miibus, 1, 1, 1);
363 DRIVER_MODULE(if_bnx, pci, bnx_driver, bnx_devclass, NULL, NULL);
364 DRIVER_MODULE(miibus, bnx, miibus_driver, miibus_devclass, NULL, NULL);
367 bnx_readmem_ind(struct bnx_softc *sc, uint32_t off)
369 device_t dev = sc->bnx_dev;
372 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
373 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
374 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
379 bnx_writemem_ind(struct bnx_softc *sc, uint32_t off, uint32_t val)
381 device_t dev = sc->bnx_dev;
383 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
384 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
385 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
389 bnx_writemem_direct(struct bnx_softc *sc, uint32_t off, uint32_t val)
391 CSR_WRITE_4(sc, off, val);
395 bnx_writembx(struct bnx_softc *sc, int off, int val)
397 CSR_WRITE_4(sc, off, val);
401 * Read a sequence of bytes from NVRAM.
404 bnx_read_nvram(struct bnx_softc *sc, caddr_t dest, int off, int cnt)
410 * Read a byte of data stored in the EEPROM at address 'addr.' The
411 * BCM570x supports both the traditional bitbang interface and an
412 * auto access interface for reading the EEPROM. We use the auto
416 bnx_eeprom_getbyte(struct bnx_softc *sc, uint32_t addr, uint8_t *dest)
422 * Enable use of auto EEPROM access so we can avoid
423 * having to use the bitbang method.
425 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
427 /* Reset the EEPROM, load the clock period. */
428 CSR_WRITE_4(sc, BGE_EE_ADDR,
429 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
432 /* Issue the read EEPROM command. */
433 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
435 /* Wait for completion */
436 for(i = 0; i < BNX_TIMEOUT * 10; i++) {
438 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
442 if (i == BNX_TIMEOUT) {
443 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
448 byte = CSR_READ_4(sc, BGE_EE_DATA);
450 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
456 * Read a sequence of bytes from the EEPROM.
459 bnx_read_eeprom(struct bnx_softc *sc, caddr_t dest, uint32_t off, size_t len)
465 for (byte = 0, err = 0, i = 0; i < len; i++) {
466 err = bnx_eeprom_getbyte(sc, off + i, &byte);
476 bnx_miibus_readreg(device_t dev, int phy, int reg)
478 struct bnx_softc *sc = device_get_softc(dev);
482 KASSERT(phy == sc->bnx_phyno,
483 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno));
485 if (bnx_ape_lock(sc, sc->bnx_phy_ape_lock) != 0)
488 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
489 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
490 CSR_WRITE_4(sc, BGE_MI_MODE,
491 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
495 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
496 BGE_MIPHY(phy) | BGE_MIREG(reg));
498 /* Poll for the PHY register access to complete. */
499 for (i = 0; i < BNX_TIMEOUT; i++) {
501 val = CSR_READ_4(sc, BGE_MI_COMM);
502 if ((val & BGE_MICOMM_BUSY) == 0) {
504 val = CSR_READ_4(sc, BGE_MI_COMM);
508 if (i == BNX_TIMEOUT) {
509 if_printf(&sc->arpcom.ac_if, "PHY read timed out "
510 "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val);
514 /* Restore the autopoll bit if necessary. */
515 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
516 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
520 bnx_ape_unlock(sc, sc->bnx_phy_ape_lock);
522 if (val & BGE_MICOMM_READFAIL)
525 return (val & 0xFFFF);
529 bnx_miibus_writereg(device_t dev, int phy, int reg, int val)
531 struct bnx_softc *sc = device_get_softc(dev);
534 KASSERT(phy == sc->bnx_phyno,
535 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno));
537 if (bnx_ape_lock(sc, sc->bnx_phy_ape_lock) != 0)
540 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
541 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
542 CSR_WRITE_4(sc, BGE_MI_MODE,
543 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
547 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
548 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
550 for (i = 0; i < BNX_TIMEOUT; i++) {
552 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
554 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
558 if (i == BNX_TIMEOUT) {
559 if_printf(&sc->arpcom.ac_if, "PHY write timed out "
560 "(phy %d, reg %d, val %d)\n", phy, reg, val);
563 /* Restore the autopoll bit if necessary. */
564 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
565 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
569 bnx_ape_unlock(sc, sc->bnx_phy_ape_lock);
575 bnx_miibus_statchg(device_t dev)
577 struct bnx_softc *sc;
578 struct mii_data *mii;
581 sc = device_get_softc(dev);
582 if ((sc->arpcom.ac_if.if_flags & IFF_RUNNING) == 0)
585 mii = device_get_softc(sc->bnx_miibus);
587 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
588 (IFM_ACTIVE | IFM_AVALID)) {
589 switch (IFM_SUBTYPE(mii->mii_media_active)) {
606 if (sc->bnx_link == 0)
610 * APE firmware touches these registers to keep the MAC
611 * connected to the outside world. Try to keep the
615 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) &
616 ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX);
618 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
619 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
620 mac_mode |= BGE_PORTMODE_GMII;
622 mac_mode |= BGE_PORTMODE_MII;
624 if ((mii->mii_media_active & IFM_GMASK) != IFM_FDX)
625 mac_mode |= BGE_MACMODE_HALF_DUPLEX;
627 CSR_WRITE_4(sc, BGE_MAC_MODE, mac_mode);
632 * Memory management for jumbo frames.
635 bnx_alloc_jumbo_mem(struct bnx_softc *sc)
637 struct ifnet *ifp = &sc->arpcom.ac_if;
638 struct bnx_jslot *entry;
644 * Create tag for jumbo mbufs.
645 * This is really a bit of a kludge. We allocate a special
646 * jumbo buffer pool which (thanks to the way our DMA
647 * memory allocation works) will consist of contiguous
648 * pages. This means that even though a jumbo buffer might
649 * be larger than a page size, we don't really need to
650 * map it into more than one DMA segment. However, the
651 * default mbuf tag will result in multi-segment mappings,
652 * so we have to create a special jumbo mbuf tag that
653 * lets us get away with mapping the jumbo buffers as
654 * a single segment. I think eventually the driver should
655 * be changed so that it uses ordinary mbufs and cluster
656 * buffers, i.e. jumbo frames can span multiple DMA
657 * descriptors. But that's a project for another day.
661 * Create DMA stuffs for jumbo RX ring.
663 error = bnx_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ,
664 &sc->bnx_cdata.bnx_rx_jumbo_ring_tag,
665 &sc->bnx_cdata.bnx_rx_jumbo_ring_map,
666 (void *)&sc->bnx_ldata.bnx_rx_jumbo_ring,
667 &sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
669 if_printf(ifp, "could not create jumbo RX ring\n");
674 * Create DMA stuffs for jumbo buffer block.
676 error = bnx_dma_block_alloc(sc, BNX_JMEM,
677 &sc->bnx_cdata.bnx_jumbo_tag,
678 &sc->bnx_cdata.bnx_jumbo_map,
679 (void **)&sc->bnx_ldata.bnx_jumbo_buf,
682 if_printf(ifp, "could not create jumbo buffer\n");
686 SLIST_INIT(&sc->bnx_jfree_listhead);
689 * Now divide it up into 9K pieces and save the addresses
690 * in an array. Note that we play an evil trick here by using
691 * the first few bytes in the buffer to hold the the address
692 * of the softc structure for this interface. This is because
693 * bnx_jfree() needs it, but it is called by the mbuf management
694 * code which will not pass it to us explicitly.
696 for (i = 0, ptr = sc->bnx_ldata.bnx_jumbo_buf; i < BNX_JSLOTS; i++) {
697 entry = &sc->bnx_cdata.bnx_jslots[i];
699 entry->bnx_buf = ptr;
700 entry->bnx_paddr = paddr;
701 entry->bnx_inuse = 0;
703 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead, entry, jslot_link);
712 bnx_free_jumbo_mem(struct bnx_softc *sc)
714 /* Destroy jumbo RX ring. */
715 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_jumbo_ring_tag,
716 sc->bnx_cdata.bnx_rx_jumbo_ring_map,
717 sc->bnx_ldata.bnx_rx_jumbo_ring);
719 /* Destroy jumbo buffer block. */
720 bnx_dma_block_free(sc->bnx_cdata.bnx_jumbo_tag,
721 sc->bnx_cdata.bnx_jumbo_map,
722 sc->bnx_ldata.bnx_jumbo_buf);
726 * Allocate a jumbo buffer.
728 static struct bnx_jslot *
729 bnx_jalloc(struct bnx_softc *sc)
731 struct bnx_jslot *entry;
733 lwkt_serialize_enter(&sc->bnx_jslot_serializer);
734 entry = SLIST_FIRST(&sc->bnx_jfree_listhead);
736 SLIST_REMOVE_HEAD(&sc->bnx_jfree_listhead, jslot_link);
737 entry->bnx_inuse = 1;
739 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
741 lwkt_serialize_exit(&sc->bnx_jslot_serializer);
746 * Adjust usage count on a jumbo buffer.
751 struct bnx_jslot *entry = (struct bnx_jslot *)arg;
752 struct bnx_softc *sc = entry->bnx_sc;
755 panic("bnx_jref: can't find softc pointer!");
757 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) {
758 panic("bnx_jref: asked to reference buffer "
759 "that we don't manage!");
760 } else if (entry->bnx_inuse == 0) {
761 panic("bnx_jref: buffer already free!");
763 atomic_add_int(&entry->bnx_inuse, 1);
768 * Release a jumbo buffer.
773 struct bnx_jslot *entry = (struct bnx_jslot *)arg;
774 struct bnx_softc *sc = entry->bnx_sc;
777 panic("bnx_jfree: can't find softc pointer!");
779 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) {
780 panic("bnx_jfree: asked to free buffer that we don't manage!");
781 } else if (entry->bnx_inuse == 0) {
782 panic("bnx_jfree: buffer already free!");
785 * Possible MP race to 0, use the serializer. The atomic insn
786 * is still needed for races against bnx_jref().
788 lwkt_serialize_enter(&sc->bnx_jslot_serializer);
789 atomic_subtract_int(&entry->bnx_inuse, 1);
790 if (entry->bnx_inuse == 0) {
791 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead,
794 lwkt_serialize_exit(&sc->bnx_jslot_serializer);
800 * Intialize a standard receive ring descriptor.
803 bnx_newbuf_std(struct bnx_rx_ret_ring *ret, int i, int init)
805 struct mbuf *m_new = NULL;
806 bus_dma_segment_t seg;
809 struct bnx_rx_buf *rb;
811 rb = &ret->bnx_std->bnx_rx_std_buf[i];
812 KASSERT(!rb->bnx_rx_refilled, ("RX buf %dth has been refilled", i));
814 m_new = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR);
819 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
820 m_adj(m_new, ETHER_ALIGN);
822 error = bus_dmamap_load_mbuf_segment(ret->bnx_rx_mtag,
823 ret->bnx_rx_tmpmap, m_new, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
830 bus_dmamap_sync(ret->bnx_rx_mtag, rb->bnx_rx_dmamap,
831 BUS_DMASYNC_POSTREAD);
832 bus_dmamap_unload(ret->bnx_rx_mtag, rb->bnx_rx_dmamap);
835 map = ret->bnx_rx_tmpmap;
836 ret->bnx_rx_tmpmap = rb->bnx_rx_dmamap;
838 rb->bnx_rx_dmamap = map;
839 rb->bnx_rx_mbuf = m_new;
840 rb->bnx_rx_paddr = seg.ds_addr;
841 rb->bnx_rx_len = m_new->m_len;
844 rb->bnx_rx_refilled = 1;
849 bnx_setup_rxdesc_std(struct bnx_rx_std_ring *std, int i)
851 struct bnx_rx_buf *rb;
856 rb = &std->bnx_rx_std_buf[i];
857 KASSERT(rb->bnx_rx_refilled, ("RX buf %dth is not refilled", i));
859 paddr = rb->bnx_rx_paddr;
860 len = rb->bnx_rx_len;
864 rb->bnx_rx_refilled = 0;
866 r = &std->bnx_rx_std_ring[i];
867 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(paddr);
868 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(paddr);
871 r->bge_flags = BGE_RXBDFLAG_END;
875 * Initialize a jumbo receive ring descriptor. This allocates
876 * a jumbo buffer from the pool managed internally by the driver.
879 bnx_newbuf_jumbo(struct bnx_softc *sc, int i, int init)
881 struct mbuf *m_new = NULL;
882 struct bnx_jslot *buf;
885 /* Allocate the mbuf. */
886 MGETHDR(m_new, init ? M_WAITOK : M_NOWAIT, MT_DATA);
890 /* Allocate the jumbo buffer */
891 buf = bnx_jalloc(sc);
897 /* Attach the buffer to the mbuf. */
898 m_new->m_ext.ext_arg = buf;
899 m_new->m_ext.ext_buf = buf->bnx_buf;
900 m_new->m_ext.ext_free = bnx_jfree;
901 m_new->m_ext.ext_ref = bnx_jref;
902 m_new->m_ext.ext_size = BNX_JUMBO_FRAMELEN;
904 m_new->m_flags |= M_EXT;
906 m_new->m_data = m_new->m_ext.ext_buf;
907 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
909 paddr = buf->bnx_paddr;
910 m_adj(m_new, ETHER_ALIGN);
911 paddr += ETHER_ALIGN;
913 /* Save necessary information */
914 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_rx_mbuf = m_new;
915 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_rx_paddr = paddr;
917 /* Set up the descriptor. */
918 bnx_setup_rxdesc_jumbo(sc, i);
923 bnx_setup_rxdesc_jumbo(struct bnx_softc *sc, int i)
926 struct bnx_rx_buf *rc;
928 r = &sc->bnx_ldata.bnx_rx_jumbo_ring[i];
929 rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i];
931 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_rx_paddr);
932 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_rx_paddr);
933 r->bge_len = rc->bnx_rx_mbuf->m_len;
935 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
939 bnx_init_rx_ring_std(struct bnx_rx_std_ring *std)
943 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
944 /* Use the first RX return ring's tmp RX mbuf DMA map */
945 error = bnx_newbuf_std(&std->bnx_sc->bnx_rx_ret_ring[0], i, 1);
948 bnx_setup_rxdesc_std(std, i);
951 std->bnx_rx_std_used = 0;
952 std->bnx_rx_std_refill = 0;
953 std->bnx_rx_std_running = 0;
955 lwkt_serialize_handler_enable(&std->bnx_rx_std_serialize);
957 std->bnx_rx_std = BGE_STD_RX_RING_CNT - 1;
958 bnx_writembx(std->bnx_sc, BGE_MBX_RX_STD_PROD_LO, std->bnx_rx_std);
964 bnx_free_rx_ring_std(struct bnx_rx_std_ring *std)
968 lwkt_serialize_handler_disable(&std->bnx_rx_std_serialize);
970 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
971 struct bnx_rx_buf *rb = &std->bnx_rx_std_buf[i];
973 rb->bnx_rx_refilled = 0;
974 if (rb->bnx_rx_mbuf != NULL) {
975 bus_dmamap_unload(std->bnx_rx_mtag, rb->bnx_rx_dmamap);
976 m_freem(rb->bnx_rx_mbuf);
977 rb->bnx_rx_mbuf = NULL;
979 bzero(&std->bnx_rx_std_ring[i], sizeof(struct bge_rx_bd));
984 bnx_init_rx_ring_jumbo(struct bnx_softc *sc)
989 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
990 error = bnx_newbuf_jumbo(sc, i, 1);
995 sc->bnx_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
997 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb;
998 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
999 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1001 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo);
1007 bnx_free_rx_ring_jumbo(struct bnx_softc *sc)
1011 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1012 struct bnx_rx_buf *rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i];
1014 if (rc->bnx_rx_mbuf != NULL) {
1015 m_freem(rc->bnx_rx_mbuf);
1016 rc->bnx_rx_mbuf = NULL;
1018 bzero(&sc->bnx_ldata.bnx_rx_jumbo_ring[i],
1019 sizeof(struct bge_rx_bd));
1024 bnx_free_tx_ring(struct bnx_tx_ring *txr)
1028 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1029 struct bnx_tx_buf *buf = &txr->bnx_tx_buf[i];
1031 if (buf->bnx_tx_mbuf != NULL) {
1032 bus_dmamap_unload(txr->bnx_tx_mtag,
1033 buf->bnx_tx_dmamap);
1034 m_freem(buf->bnx_tx_mbuf);
1035 buf->bnx_tx_mbuf = NULL;
1037 bzero(&txr->bnx_tx_ring[i], sizeof(struct bge_tx_bd));
1039 txr->bnx_tx_saved_considx = BNX_TXCONS_UNSET;
1043 bnx_init_tx_ring(struct bnx_tx_ring *txr)
1045 txr->bnx_tx_cnt = 0;
1046 txr->bnx_tx_saved_considx = 0;
1047 txr->bnx_tx_prodidx = 0;
1049 /* Initialize transmit producer index for host-memory send ring. */
1050 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, txr->bnx_tx_prodidx);
1056 bnx_setmulti(struct bnx_softc *sc)
1059 struct ifmultiaddr *ifma;
1060 uint32_t hashes[4] = { 0, 0, 0, 0 };
1063 ifp = &sc->arpcom.ac_if;
1065 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1066 for (i = 0; i < 4; i++)
1067 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1071 /* First, zot all the existing filters. */
1072 for (i = 0; i < 4; i++)
1073 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1075 /* Now program new ones. */
1076 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1077 if (ifma->ifma_addr->sa_family != AF_LINK)
1080 LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1081 ETHER_ADDR_LEN) & 0x7f;
1082 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1085 for (i = 0; i < 4; i++)
1086 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1090 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1091 * self-test results.
1094 bnx_chipinit(struct bnx_softc *sc)
1096 uint32_t dma_rw_ctl, mode_ctl;
1099 /* Set endian type before we access any non-PCI registers. */
1100 pci_write_config(sc->bnx_dev, BGE_PCI_MISC_CTL,
1101 BGE_INIT | BGE_PCIMISCCTL_TAGGED_STATUS, 4);
1104 * Clear the MAC statistics block in the NIC's
1107 for (i = BGE_STATS_BLOCK;
1108 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1109 BNX_MEMWIN_WRITE(sc, i, 0);
1111 for (i = BGE_STATUS_BLOCK;
1112 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1113 BNX_MEMWIN_WRITE(sc, i, 0);
1115 if (BNX_IS_57765_FAMILY(sc)) {
1118 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) {
1119 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
1120 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS;
1122 /* Access the lower 1K of PL PCI-E block registers. */
1123 CSR_WRITE_4(sc, BGE_MODE_CTL,
1124 val | BGE_MODECTL_PCIE_PL_SEL);
1126 val = CSR_READ_4(sc, BGE_PCIE_PL_LO_PHYCTL5);
1127 val |= BGE_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ;
1128 CSR_WRITE_4(sc, BGE_PCIE_PL_LO_PHYCTL5, val);
1130 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1132 if (sc->bnx_chiprev != BGE_CHIPREV_57765_AX) {
1133 /* Fix transmit hangs */
1134 val = CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL);
1135 val |= BGE_CPMU_PADRNG_CTL_RDIV2;
1136 CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL, val);
1138 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
1139 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS;
1141 /* Access the lower 1K of DL PCI-E block registers. */
1142 CSR_WRITE_4(sc, BGE_MODE_CTL,
1143 val | BGE_MODECTL_PCIE_DL_SEL);
1145 val = CSR_READ_4(sc, BGE_PCIE_DL_LO_FTSMAX);
1146 val &= ~BGE_PCIE_DL_LO_FTSMAX_MASK;
1147 val |= BGE_PCIE_DL_LO_FTSMAX_VAL;
1148 CSR_WRITE_4(sc, BGE_PCIE_DL_LO_FTSMAX, val);
1150 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1153 val = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK);
1154 val &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK;
1155 val |= BGE_CPMU_LSPD_10MB_MACCLK_6_25;
1156 CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, val);
1160 * Set up the PCI DMA control register.
1162 dma_rw_ctl = pci_read_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, 4);
1164 * Disable 32bytes cache alignment for DMA write to host memory
1167 * 64bytes cache alignment for DMA write to host memory is still
1170 dma_rw_ctl |= BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1171 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0)
1172 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1174 * Enable HW workaround for controllers that misinterpret
1175 * a status tag update and leave interrupts permanently
1178 if (sc->bnx_asicrev != BGE_ASICREV_BCM5717 &&
1179 sc->bnx_asicrev != BGE_ASICREV_BCM5762 &&
1180 !BNX_IS_57765_FAMILY(sc))
1181 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1183 if_printf(&sc->arpcom.ac_if, "DMA read/write %#x\n",
1186 pci_write_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1189 * Set up general mode register.
1191 mode_ctl = bnx_dma_swap_options(sc);
1192 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
1193 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1194 /* Retain Host-2-BMC settings written by APE firmware. */
1195 mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) &
1196 (BGE_MODECTL_BYTESWAP_B2HRX_DATA |
1197 BGE_MODECTL_WORDSWAP_B2HRX_DATA |
1198 BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE);
1200 mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR |
1201 BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM;
1202 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1205 * Disable memory write invalidate. Apparently it is not supported
1206 * properly by these devices. Also ensure that INTx isn't disabled,
1207 * as these chips need it even when using MSI.
1209 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_CMD,
1210 (PCIM_CMD_MWRICEN | PCIM_CMD_INTxDIS), 4);
1212 /* Set the timer prescaler (always 66Mhz) */
1213 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1219 bnx_blockinit(struct bnx_softc *sc)
1221 struct bnx_intr_data *intr;
1222 struct bge_rcb *rcb;
1229 * Initialize the memory window pointer register so that
1230 * we can access the first 32K of internal NIC RAM. This will
1231 * allow us to set up the TX send ring RCBs and the RX return
1232 * ring RCBs, plus other things which live in NIC memory.
1234 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1236 /* Configure mbuf pool watermarks */
1237 if (BNX_IS_57765_PLUS(sc)) {
1238 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1239 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) {
1240 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1241 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1243 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1244 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1247 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1248 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1249 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1252 /* Configure DMA resource watermarks */
1253 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1254 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1256 /* Enable buffer manager */
1257 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1259 * Change the arbitration algorithm of TXMBUF read request to
1260 * round-robin instead of priority based for BCM5719. When
1261 * TXFIFO is almost empty, RDMA will hold its request until
1262 * TXFIFO is not almost empty.
1264 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719)
1265 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
1266 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1267 sc->bnx_chipid == BGE_CHIPID_BCM5719_A0 ||
1268 sc->bnx_chipid == BGE_CHIPID_BCM5720_A0)
1269 val |= BGE_BMANMODE_LOMBUF_ATTN;
1270 CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1272 /* Poll for buffer manager start indication */
1273 for (i = 0; i < BNX_TIMEOUT; i++) {
1274 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1279 if (i == BNX_TIMEOUT) {
1280 if_printf(&sc->arpcom.ac_if,
1281 "buffer manager failed to start\n");
1285 /* Enable flow-through queues */
1286 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1287 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1289 /* Wait until queue initialization is complete */
1290 for (i = 0; i < BNX_TIMEOUT; i++) {
1291 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1296 if (i == BNX_TIMEOUT) {
1297 if_printf(&sc->arpcom.ac_if,
1298 "flow-through queue init failed\n");
1303 * Summary of rings supported by the controller:
1305 * Standard Receive Producer Ring
1306 * - This ring is used to feed receive buffers for "standard"
1307 * sized frames (typically 1536 bytes) to the controller.
1309 * Jumbo Receive Producer Ring
1310 * - This ring is used to feed receive buffers for jumbo sized
1311 * frames (i.e. anything bigger than the "standard" frames)
1312 * to the controller.
1314 * Mini Receive Producer Ring
1315 * - This ring is used to feed receive buffers for "mini"
1316 * sized frames to the controller.
1317 * - This feature required external memory for the controller
1318 * but was never used in a production system. Should always
1321 * Receive Return Ring
1322 * - After the controller has placed an incoming frame into a
1323 * receive buffer that buffer is moved into a receive return
1324 * ring. The driver is then responsible to passing the
1325 * buffer up to the stack. BCM5718/BCM57785 families support
1326 * multiple receive return rings.
1329 * - This ring is used for outgoing frames. BCM5719/BCM5720
1330 * support multiple send rings.
1333 /* Initialize the standard receive producer ring control block. */
1334 rcb = &sc->bnx_ldata.bnx_info.bnx_std_rx_rcb;
1335 rcb->bge_hostaddr.bge_addr_lo =
1336 BGE_ADDR_LO(sc->bnx_rx_std_ring.bnx_rx_std_ring_paddr);
1337 rcb->bge_hostaddr.bge_addr_hi =
1338 BGE_ADDR_HI(sc->bnx_rx_std_ring.bnx_rx_std_ring_paddr);
1339 if (BNX_IS_57765_PLUS(sc)) {
1341 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1342 * Bits 15-2 : Maximum RX frame size
1343 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
1346 rcb->bge_maxlen_flags =
1347 BGE_RCB_MAXLEN_FLAGS(512, BNX_MAX_FRAMELEN << 2);
1350 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1351 * Bits 15-2 : Reserved (should be 0)
1352 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1355 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1357 if (BNX_IS_5717_PLUS(sc))
1358 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1360 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1361 /* Write the standard receive producer ring control block. */
1362 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1363 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1364 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1365 if (!BNX_IS_5717_PLUS(sc))
1366 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1367 /* Reset the standard receive producer ring producer index. */
1368 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1371 * Initialize the jumbo RX producer ring control
1372 * block. We set the 'ring disabled' bit in the
1373 * flags field until we're actually ready to start
1374 * using this ring (i.e. once we set the MTU
1375 * high enough to require it).
1377 if (BNX_IS_JUMBO_CAPABLE(sc)) {
1378 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb;
1379 /* Get the jumbo receive producer ring RCB parameters. */
1380 rcb->bge_hostaddr.bge_addr_lo =
1381 BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
1382 rcb->bge_hostaddr.bge_addr_hi =
1383 BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
1384 rcb->bge_maxlen_flags =
1385 BGE_RCB_MAXLEN_FLAGS(BNX_MAX_FRAMELEN,
1386 BGE_RCB_FLAG_RING_DISABLED);
1387 if (BNX_IS_5717_PLUS(sc))
1388 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1390 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1391 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1392 rcb->bge_hostaddr.bge_addr_hi);
1393 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1394 rcb->bge_hostaddr.bge_addr_lo);
1395 /* Program the jumbo receive producer ring RCB parameters. */
1396 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1397 rcb->bge_maxlen_flags);
1398 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1399 /* Reset the jumbo receive producer ring producer index. */
1400 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1404 * The BD ring replenish thresholds control how often the
1405 * hardware fetches new BD's from the producer rings in host
1406 * memory. Setting the value too low on a busy system can
1407 * starve the hardware and recue the throughpout.
1409 * Set the BD ring replentish thresholds. The recommended
1410 * values are 1/8th the number of descriptors allocated to
1414 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1415 if (BNX_IS_JUMBO_CAPABLE(sc)) {
1416 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1417 BGE_JUMBO_RX_RING_CNT/8);
1419 if (BNX_IS_57765_PLUS(sc)) {
1420 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1421 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1425 * Disable all send rings by setting the 'ring disabled' bit
1426 * in the flags field of all the TX send ring control blocks,
1427 * located in NIC memory.
1429 if (BNX_IS_5717_PLUS(sc))
1431 else if (BNX_IS_57765_FAMILY(sc) ||
1432 sc->bnx_asicrev == BGE_ASICREV_BCM5762)
1436 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1437 for (i = 0; i < limit; i++) {
1438 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1439 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1440 vrcb += sizeof(struct bge_rcb);
1444 * Configure send ring RCBs
1446 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1447 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
1448 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
1450 BGE_HOSTADDR(taddr, txr->bnx_tx_ring_paddr);
1451 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi,
1453 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo,
1455 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1456 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1457 vrcb += sizeof(struct bge_rcb);
1461 * Disable all receive return rings by setting the
1462 * 'ring disabled' bit in the flags field of all the receive
1463 * return ring control blocks, located in NIC memory.
1465 if (BNX_IS_5717_PLUS(sc)) {
1466 /* Should be 17, use 16 until we get an SRAM map. */
1468 } else if (BNX_IS_57765_FAMILY(sc) ||
1469 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1474 /* Disable all receive return rings. */
1475 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1476 for (i = 0; i < limit; i++) {
1477 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1478 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1479 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1480 BGE_RCB_FLAG_RING_DISABLED);
1481 bnx_writembx(sc, BGE_MBX_RX_CONS0_LO +
1482 (i * (sizeof(uint64_t))), 0);
1483 vrcb += sizeof(struct bge_rcb);
1487 * Set up receive return rings.
1489 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1490 for (i = 0; i < sc->bnx_rx_retcnt; ++i) {
1491 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i];
1493 BGE_HOSTADDR(taddr, ret->bnx_rx_ret_ring_paddr);
1494 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi,
1496 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo,
1498 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1499 BGE_RCB_MAXLEN_FLAGS(BNX_RETURN_RING_CNT, 0));
1500 vrcb += sizeof(struct bge_rcb);
1503 /* Set random backoff seed for TX */
1504 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1505 (sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1506 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1507 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5]) &
1508 BGE_TX_BACKOFF_SEED_MASK);
1510 /* Set inter-packet gap */
1512 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
1513 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1514 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
1515 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
1517 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
1520 * Specify which ring to use for packets that don't match
1523 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1526 * Configure number of RX lists. One interrupt distribution
1527 * list, sixteen active lists, one bad frames class.
1529 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1531 /* Inialize RX list placement stats mask. */
1532 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1533 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1535 /* Disable host coalescing until we get it set up */
1536 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1538 /* Poll to make sure it's shut down. */
1539 for (i = 0; i < BNX_TIMEOUT; i++) {
1540 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1545 if (i == BNX_TIMEOUT) {
1546 if_printf(&sc->arpcom.ac_if,
1547 "host coalescing engine failed to idle\n");
1551 /* Set up host coalescing defaults */
1552 sc->bnx_coal_chg = BNX_RX_COAL_TICKS_CHG |
1553 BNX_TX_COAL_TICKS_CHG |
1554 BNX_RX_COAL_BDS_CHG |
1555 BNX_TX_COAL_BDS_CHG |
1556 BNX_RX_COAL_BDS_INT_CHG |
1557 BNX_TX_COAL_BDS_INT_CHG;
1558 bnx_coal_change(sc);
1561 * Set up addresses of status blocks
1563 intr = &sc->bnx_intr_data[0];
1564 bzero(intr->bnx_status_block, BGE_STATUS_BLK_SZ);
1565 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1566 BGE_ADDR_HI(intr->bnx_status_block_paddr));
1567 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1568 BGE_ADDR_LO(intr->bnx_status_block_paddr));
1569 for (i = 1; i < sc->bnx_intr_cnt; ++i) {
1570 intr = &sc->bnx_intr_data[i];
1571 bzero(intr->bnx_status_block, BGE_STATUS_BLK_SZ);
1572 CSR_WRITE_4(sc, BGE_VEC1_STATUSBLK_ADDR_HI + ((i - 1) * 8),
1573 BGE_ADDR_HI(intr->bnx_status_block_paddr));
1574 CSR_WRITE_4(sc, BGE_VEC1_STATUSBLK_ADDR_LO + ((i - 1) * 8),
1575 BGE_ADDR_LO(intr->bnx_status_block_paddr));
1578 /* Set up status block partail update size. */
1579 val = BGE_STATBLKSZ_32BYTE;
1582 * Does not seem to have visible effect in both
1583 * bulk data (1472B UDP datagram) and tiny data
1584 * (18B UDP datagram) TX tests.
1586 val |= BGE_HCCMODE_CLRTICK_TX;
1588 /* Turn on host coalescing state machine */
1589 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1591 /* Turn on RX BD completion state machine and enable attentions */
1592 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1593 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1595 /* Turn on RX list placement state machine */
1596 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1598 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1599 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1600 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1601 BGE_MACMODE_FRMHDR_DMA_ENB;
1603 if (sc->bnx_flags & BNX_FLAG_TBI)
1604 val |= BGE_PORTMODE_TBI;
1605 else if (sc->bnx_flags & BNX_FLAG_MII_SERDES)
1606 val |= BGE_PORTMODE_GMII;
1608 val |= BGE_PORTMODE_MII;
1610 /* Allow APE to send/receive frames. */
1611 if (sc->bnx_mfw_flags & BNX_MFW_ON_APE)
1612 val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
1614 /* Turn on DMA, clear stats */
1615 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1618 /* Set misc. local control, enable interrupts on attentions */
1619 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1622 /* Assert GPIO pins for PHY reset */
1623 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1624 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1625 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1626 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1629 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSIX)
1630 bnx_enable_msi(sc, TRUE);
1632 /* Turn on write DMA state machine */
1633 val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1634 /* Enable host coalescing bug fix. */
1635 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1636 if (sc->bnx_asicrev == BGE_ASICREV_BCM5785) {
1637 /* Request larger DMA burst size to get better performance. */
1638 val |= BGE_WDMAMODE_BURST_ALL_DATA;
1640 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1643 if (BNX_IS_57765_PLUS(sc)) {
1644 uint32_t dmactl, dmactl_reg;
1646 if (sc->bnx_asicrev == BGE_ASICREV_BCM5762)
1647 dmactl_reg = BGE_RDMA_RSRVCTRL2;
1649 dmactl_reg = BGE_RDMA_RSRVCTRL;
1651 dmactl = CSR_READ_4(sc, dmactl_reg);
1653 * Adjust tx margin to prevent TX data corruption and
1654 * fix internal FIFO overflow.
1656 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1657 sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
1658 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1659 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
1660 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
1661 BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
1662 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
1663 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
1664 BGE_RDMA_RSRVCTRL_TXMRGN_320B;
1667 * Enable fix for read DMA FIFO overruns.
1668 * The fix is to limit the number of RX BDs
1669 * the hardware would fetch at a fime.
1671 CSR_WRITE_4(sc, dmactl_reg,
1672 dmactl | BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
1675 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) {
1676 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
1677 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
1678 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
1679 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
1680 } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
1681 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1684 if (sc->bnx_asicrev == BGE_ASICREV_BCM5762)
1685 ctrl_reg = BGE_RDMA_LSO_CRPTEN_CTRL2;
1687 ctrl_reg = BGE_RDMA_LSO_CRPTEN_CTRL;
1690 * Allow 4KB burst length reads for non-LSO frames.
1691 * Enable 512B burst length reads for buffer descriptors.
1693 CSR_WRITE_4(sc, ctrl_reg,
1694 CSR_READ_4(sc, ctrl_reg) |
1695 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
1696 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
1699 /* Turn on read DMA state machine */
1700 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1701 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717)
1702 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
1703 if (sc->bnx_asicrev == BGE_ASICREV_BCM5784 ||
1704 sc->bnx_asicrev == BGE_ASICREV_BCM5785 ||
1705 sc->bnx_asicrev == BGE_ASICREV_BCM57780) {
1706 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1707 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1708 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1710 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
1711 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
1712 val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
1713 BGE_RDMAMODE_H2BNC_VLAN_DET;
1715 * Allow multiple outstanding read requests from
1716 * non-LSO read DMA engine.
1718 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
1720 if (sc->bnx_asicrev == BGE_ASICREV_BCM57766)
1721 val |= BGE_RDMAMODE_JMB_2K_MMRR;
1722 if (sc->bnx_flags & BNX_FLAG_TSO)
1723 val |= BGE_RDMAMODE_TSO4_ENABLE;
1724 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1725 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1728 /* Turn on RX data completion state machine */
1729 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1731 /* Turn on RX BD initiator state machine */
1732 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1734 /* Turn on RX data and RX BD initiator state machine */
1735 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1737 /* Turn on send BD completion state machine */
1738 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1740 /* Turn on send data completion state machine */
1741 val = BGE_SDCMODE_ENABLE;
1742 if (sc->bnx_asicrev == BGE_ASICREV_BCM5761)
1743 val |= BGE_SDCMODE_CDELAY;
1744 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1746 /* Turn on send data initiator state machine */
1747 if (sc->bnx_flags & BNX_FLAG_TSO) {
1748 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
1749 BGE_SDIMODE_HW_LSO_PRE_DMA);
1751 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1754 /* Turn on send BD initiator state machine */
1755 val = BGE_SBDIMODE_ENABLE;
1756 if (sc->bnx_tx_ringcnt > 1)
1757 val |= BGE_SBDIMODE_MULTI_TXR;
1758 CSR_WRITE_4(sc, BGE_SBDI_MODE, val);
1760 /* Turn on send BD selector state machine */
1761 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1763 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1764 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1765 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1767 /* ack/clear link change events */
1768 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1769 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1770 BGE_MACSTAT_LINK_CHANGED);
1771 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1774 * Enable attention when the link has changed state for
1775 * devices that use auto polling.
1777 if (sc->bnx_flags & BNX_FLAG_TBI) {
1778 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1780 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
1781 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
1787 * Clear any pending link state attention.
1788 * Otherwise some link state change events may be lost until attention
1789 * is cleared by bnx_intr() -> bnx_softc.bnx_link_upd() sequence.
1790 * It's not necessary on newer BCM chips - perhaps enabling link
1791 * state change attentions implies clearing pending attention.
1793 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1794 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1795 BGE_MACSTAT_LINK_CHANGED);
1797 /* Enable link state change attentions. */
1798 BNX_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1804 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1805 * against our list and return its name if we find a match. Note
1806 * that since the Broadcom controller contains VPD support, we
1807 * can get the device name string from the controller itself instead
1808 * of the compiled-in string. This is a little slow, but it guarantees
1809 * we'll always announce the right product name.
1812 bnx_probe(device_t dev)
1814 const struct bnx_type *t;
1815 uint16_t product, vendor;
1817 if (!pci_is_pcie(dev))
1820 product = pci_get_device(dev);
1821 vendor = pci_get_vendor(dev);
1823 for (t = bnx_devs; t->bnx_name != NULL; t++) {
1824 if (vendor == t->bnx_vid && product == t->bnx_did)
1827 if (t->bnx_name == NULL)
1830 device_set_desc(dev, t->bnx_name);
1835 bnx_attach(device_t dev)
1838 struct bnx_softc *sc;
1839 struct bnx_rx_std_ring *std;
1840 struct sysctl_ctx_list *ctx;
1841 struct sysctl_oid_list *tree;
1843 int error = 0, rid, capmask, i, std_cpuid, std_cpuid_def;
1844 uint8_t ether_addr[ETHER_ADDR_LEN];
1846 uintptr_t mii_priv = 0;
1847 #if defined(BNX_TSO_DEBUG) || defined(BNX_RSS_DEBUG) || defined(BNX_TSS_DEBUG)
1850 #ifdef IFPOLL_ENABLE
1851 int offset, offset_def;
1854 sc = device_get_softc(dev);
1856 callout_init_mp(&sc->bnx_tick_timer);
1857 lwkt_serialize_init(&sc->bnx_jslot_serializer);
1858 lwkt_serialize_init(&sc->bnx_main_serialize);
1860 /* Always setup interrupt mailboxes */
1861 for (i = 0; i < BNX_INTR_MAX; ++i) {
1862 callout_init_mp(&sc->bnx_intr_data[i].bnx_intr_timer);
1863 sc->bnx_intr_data[i].bnx_sc = sc;
1864 sc->bnx_intr_data[i].bnx_intr_mbx = BGE_MBX_IRQ0_LO + (i * 8);
1865 sc->bnx_intr_data[i].bnx_intr_rid = -1;
1866 sc->bnx_intr_data[i].bnx_intr_cpuid = -1;
1869 sc->bnx_func_addr = pci_get_function(dev);
1870 product = pci_get_device(dev);
1872 #ifndef BURN_BRIDGES
1873 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1876 irq = pci_read_config(dev, PCIR_INTLINE, 4);
1877 mem = pci_read_config(dev, BGE_PCI_BAR0, 4);
1879 device_printf(dev, "chip is in D%d power mode "
1880 "-- setting to D0\n", pci_get_powerstate(dev));
1882 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1884 pci_write_config(dev, PCIR_INTLINE, irq, 4);
1885 pci_write_config(dev, BGE_PCI_BAR0, mem, 4);
1887 #endif /* !BURN_BRIDGE */
1890 * Map control/status registers.
1892 pci_enable_busmaster(dev);
1895 sc->bnx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1898 if (sc->bnx_res == NULL) {
1899 device_printf(dev, "couldn't map memory\n");
1903 sc->bnx_btag = rman_get_bustag(sc->bnx_res);
1904 sc->bnx_bhandle = rman_get_bushandle(sc->bnx_res);
1906 /* Save various chip information */
1908 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
1909 BGE_PCIMISCCTL_ASICREV_SHIFT;
1910 if (BGE_ASICREV(sc->bnx_chipid) == BGE_ASICREV_USE_PRODID_REG) {
1911 /* All chips having dedicated ASICREV register have CPMU */
1912 sc->bnx_flags |= BNX_FLAG_CPMU;
1915 case PCI_PRODUCT_BROADCOM_BCM5717:
1916 case PCI_PRODUCT_BROADCOM_BCM5717C:
1917 case PCI_PRODUCT_BROADCOM_BCM5718:
1918 case PCI_PRODUCT_BROADCOM_BCM5719:
1919 case PCI_PRODUCT_BROADCOM_BCM5720_ALT:
1920 case PCI_PRODUCT_BROADCOM_BCM5725:
1921 case PCI_PRODUCT_BROADCOM_BCM5727:
1922 case PCI_PRODUCT_BROADCOM_BCM5762:
1923 sc->bnx_chipid = pci_read_config(dev,
1924 BGE_PCI_GEN2_PRODID_ASICREV, 4);
1927 case PCI_PRODUCT_BROADCOM_BCM57761:
1928 case PCI_PRODUCT_BROADCOM_BCM57762:
1929 case PCI_PRODUCT_BROADCOM_BCM57765:
1930 case PCI_PRODUCT_BROADCOM_BCM57766:
1931 case PCI_PRODUCT_BROADCOM_BCM57781:
1932 case PCI_PRODUCT_BROADCOM_BCM57782:
1933 case PCI_PRODUCT_BROADCOM_BCM57785:
1934 case PCI_PRODUCT_BROADCOM_BCM57786:
1935 case PCI_PRODUCT_BROADCOM_BCM57791:
1936 case PCI_PRODUCT_BROADCOM_BCM57795:
1937 sc->bnx_chipid = pci_read_config(dev,
1938 BGE_PCI_GEN15_PRODID_ASICREV, 4);
1942 sc->bnx_chipid = pci_read_config(dev,
1943 BGE_PCI_PRODID_ASICREV, 4);
1947 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_C0)
1948 sc->bnx_chipid = BGE_CHIPID_BCM5720_A0;
1950 sc->bnx_asicrev = BGE_ASICREV(sc->bnx_chipid);
1951 sc->bnx_chiprev = BGE_CHIPREV(sc->bnx_chipid);
1953 switch (sc->bnx_asicrev) {
1954 case BGE_ASICREV_BCM5717:
1955 case BGE_ASICREV_BCM5719:
1956 case BGE_ASICREV_BCM5720:
1957 sc->bnx_flags |= BNX_FLAG_5717_PLUS | BNX_FLAG_57765_PLUS;
1960 case BGE_ASICREV_BCM5762:
1961 sc->bnx_flags |= BNX_FLAG_57765_PLUS;
1964 case BGE_ASICREV_BCM57765:
1965 case BGE_ASICREV_BCM57766:
1966 sc->bnx_flags |= BNX_FLAG_57765_FAMILY | BNX_FLAG_57765_PLUS;
1970 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1971 sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1972 sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
1973 sc->bnx_asicrev == BGE_ASICREV_BCM5762)
1974 sc->bnx_flags |= BNX_FLAG_APE;
1976 sc->bnx_flags |= BNX_FLAG_TSO;
1977 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 &&
1978 sc->bnx_chipid == BGE_CHIPID_BCM5719_A0)
1979 sc->bnx_flags &= ~BNX_FLAG_TSO;
1981 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1982 BNX_IS_57765_FAMILY(sc)) {
1984 * All BCM57785 and BCM5718 families chips have a bug that
1985 * under certain situation interrupt will not be enabled
1986 * even if status tag is written to interrupt mailbox.
1988 * While BCM5719 and BCM5720 have a hardware workaround
1989 * which could fix the above bug.
1990 * See the comment near BGE_PCIDMARWCTL_TAGGED_STATUS_WA in
1993 * For the rest of the chips in these two families, we will
1994 * have to poll the status block at high rate (10ms currently)
1995 * to check whether the interrupt is hosed or not.
1996 * See bnx_check_intr_*() for details.
1998 sc->bnx_flags |= BNX_FLAG_STATUSTAG_BUG;
2001 sc->bnx_pciecap = pci_get_pciecap_ptr(sc->bnx_dev);
2002 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
2003 sc->bnx_asicrev == BGE_ASICREV_BCM5720)
2004 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_2048);
2006 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096);
2007 device_printf(dev, "CHIP ID 0x%08x; "
2008 "ASIC REV 0x%02x; CHIP REV 0x%02x\n",
2009 sc->bnx_chipid, sc->bnx_asicrev, sc->bnx_chiprev);
2012 * Set various PHY quirk flags.
2015 capmask = MII_CAPMASK_DEFAULT;
2016 if (product == PCI_PRODUCT_BROADCOM_BCM57791 ||
2017 product == PCI_PRODUCT_BROADCOM_BCM57795) {
2019 capmask &= ~BMSR_EXTSTAT;
2022 mii_priv |= BRGPHY_FLAG_WIRESPEED;
2023 if (sc->bnx_chipid == BGE_CHIPID_BCM5762_A0)
2024 mii_priv |= BRGPHY_FLAG_5762_A0;
2027 * Chips with APE need BAR2 access for APE registers/memory.
2029 if (sc->bnx_flags & BNX_FLAG_APE) {
2033 sc->bnx_res2 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2035 if (sc->bnx_res2 == NULL) {
2036 device_printf(dev, "couldn't map BAR2 memory\n");
2041 /* Enable APE register/memory access by host driver. */
2042 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2043 pcistate |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
2044 BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
2045 BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
2046 pci_write_config(dev, BGE_PCI_PCISTATE, pcistate, 4);
2048 bnx_ape_lock_init(sc);
2049 bnx_ape_read_fw_ver(sc);
2052 /* Initialize if_name earlier, so if_printf could be used */
2053 ifp = &sc->arpcom.ac_if;
2054 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2057 * Try to reset the chip.
2059 bnx_sig_pre_reset(sc, BNX_RESET_SHUTDOWN);
2061 bnx_sig_post_reset(sc, BNX_RESET_SHUTDOWN);
2063 if (bnx_chipinit(sc)) {
2064 device_printf(dev, "chip initialization failed\n");
2070 * Get station address
2072 error = bnx_get_eaddr(sc, ether_addr);
2074 device_printf(dev, "failed to read station address\n");
2078 /* Setup RX/TX and interrupt count */
2079 bnx_setup_ring_cnt(sc);
2081 if ((sc->bnx_rx_retcnt == 1 && sc->bnx_tx_ringcnt == 1) ||
2082 (sc->bnx_rx_retcnt > 1 && sc->bnx_tx_ringcnt > 1)) {
2084 * The RX ring and the corresponding TX ring processing
2085 * should be on the same CPU, since they share the same
2088 sc->bnx_flags |= BNX_FLAG_RXTX_BUNDLE;
2090 device_printf(dev, "RX/TX bundle\n");
2091 if (sc->bnx_tx_ringcnt > 1) {
2093 * Multiple TX rings do not share status block
2094 * with link status, so link status will have
2095 * to save its own status_tag.
2097 sc->bnx_flags |= BNX_FLAG_STATUS_HASTAG;
2099 device_printf(dev, "status needs tag\n");
2102 KKASSERT(sc->bnx_rx_retcnt > 1 && sc->bnx_tx_ringcnt == 1);
2104 device_printf(dev, "RX/TX not bundled\n");
2107 error = bnx_dma_alloc(dev);
2111 #ifdef IFPOLL_ENABLE
2112 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) {
2114 * NPOLLING RX/TX CPU offset
2116 if (sc->bnx_rx_retcnt == ncpus2) {
2120 (sc->bnx_rx_retcnt * device_get_unit(dev)) % ncpus2;
2121 offset = device_getenv_int(dev, "npoll.offset",
2123 if (offset >= ncpus2 ||
2124 offset % sc->bnx_rx_retcnt != 0) {
2125 device_printf(dev, "invalid npoll.offset %d, "
2126 "use %d\n", offset, offset_def);
2127 offset = offset_def;
2130 sc->bnx_npoll_rxoff = offset;
2131 sc->bnx_npoll_txoff = offset;
2134 * NPOLLING RX CPU offset
2136 if (sc->bnx_rx_retcnt == ncpus2) {
2140 (sc->bnx_rx_retcnt * device_get_unit(dev)) % ncpus2;
2141 offset = device_getenv_int(dev, "npoll.rxoff",
2143 if (offset >= ncpus2 ||
2144 offset % sc->bnx_rx_retcnt != 0) {
2145 device_printf(dev, "invalid npoll.rxoff %d, "
2146 "use %d\n", offset, offset_def);
2147 offset = offset_def;
2150 sc->bnx_npoll_rxoff = offset;
2153 * NPOLLING TX CPU offset
2155 offset_def = device_get_unit(dev) % ncpus2;
2156 offset = device_getenv_int(dev, "npoll.txoff", offset_def);
2157 if (offset >= ncpus2) {
2158 device_printf(dev, "invalid npoll.txoff %d, use %d\n",
2159 offset, offset_def);
2160 offset = offset_def;
2162 sc->bnx_npoll_txoff = offset;
2164 #endif /* IFPOLL_ENABLE */
2167 * Allocate interrupt
2169 error = bnx_alloc_intr(sc);
2173 /* Setup serializers */
2174 bnx_setup_serialize(sc);
2176 /* Set default tuneable values. */
2177 sc->bnx_rx_coal_ticks = BNX_RX_COAL_TICKS_DEF;
2178 sc->bnx_tx_coal_ticks = BNX_TX_COAL_TICKS_DEF;
2179 sc->bnx_rx_coal_bds = BNX_RX_COAL_BDS_DEF;
2180 sc->bnx_rx_coal_bds_poll = sc->bnx_rx_ret_ring[0].bnx_rx_cntmax;
2181 sc->bnx_tx_coal_bds = BNX_TX_COAL_BDS_DEF;
2182 sc->bnx_tx_coal_bds_poll = BNX_TX_COAL_BDS_POLL_DEF;
2183 sc->bnx_rx_coal_bds_int = BNX_RX_COAL_BDS_INT_DEF;
2184 sc->bnx_tx_coal_bds_int = BNX_TX_COAL_BDS_INT_DEF;
2186 /* Set up ifnet structure */
2188 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2189 ifp->if_ioctl = bnx_ioctl;
2190 ifp->if_start = bnx_start;
2191 #ifdef IFPOLL_ENABLE
2192 ifp->if_npoll = bnx_npoll;
2194 ifp->if_init = bnx_init;
2195 ifp->if_serialize = bnx_serialize;
2196 ifp->if_deserialize = bnx_deserialize;
2197 ifp->if_tryserialize = bnx_tryserialize;
2199 ifp->if_serialize_assert = bnx_serialize_assert;
2201 ifp->if_mtu = ETHERMTU;
2202 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2204 ifp->if_capabilities |= IFCAP_HWCSUM;
2205 ifp->if_hwassist = BNX_CSUM_FEATURES;
2206 if (sc->bnx_flags & BNX_FLAG_TSO) {
2207 ifp->if_capabilities |= IFCAP_TSO;
2208 ifp->if_hwassist |= CSUM_TSO;
2210 if (BNX_RSS_ENABLED(sc))
2211 ifp->if_capabilities |= IFCAP_RSS;
2212 ifp->if_capenable = ifp->if_capabilities;
2214 ifp->if_nmbclusters = BGE_STD_RX_RING_CNT;
2216 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
2217 ifq_set_ready(&ifp->if_snd);
2218 ifq_set_subq_cnt(&ifp->if_snd, sc->bnx_tx_ringcnt);
2220 if (sc->bnx_tx_ringcnt > 1) {
2221 ifp->if_mapsubq = ifq_mapsubq_mask;
2222 ifq_set_subq_mask(&ifp->if_snd, sc->bnx_tx_ringcnt - 1);
2226 * Figure out what sort of media we have by checking the
2227 * hardware config word in the first 32k of NIC internal memory,
2228 * or fall back to examining the EEPROM if necessary.
2229 * Note: on some BCM5700 cards, this value appears to be unset.
2230 * If that's the case, we have to rely on identifying the NIC
2231 * by its PCI subsystem ID, as we do below for the SysKonnect
2234 if (bnx_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC) {
2235 hwcfg = bnx_readmem_ind(sc, BGE_SRAM_DATA_CFG);
2237 if (bnx_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2239 device_printf(dev, "failed to read EEPROM\n");
2243 hwcfg = ntohl(hwcfg);
2246 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2247 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41 ||
2248 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2249 sc->bnx_flags |= BNX_FLAG_TBI;
2252 if (sc->bnx_flags & BNX_FLAG_CPMU)
2253 sc->bnx_mi_mode = BGE_MIMODE_500KHZ_CONST;
2255 sc->bnx_mi_mode = BGE_MIMODE_BASE;
2257 /* Setup link status update stuffs */
2258 if (sc->bnx_flags & BNX_FLAG_TBI) {
2259 sc->bnx_link_upd = bnx_tbi_link_upd;
2260 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
2261 } else if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
2262 sc->bnx_link_upd = bnx_autopoll_link_upd;
2263 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
2265 sc->bnx_link_upd = bnx_copper_link_upd;
2266 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
2269 /* Set default PHY address */
2273 * PHY address mapping for various devices.
2275 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2276 * ---------+-------+-------+-------+-------+
2277 * BCM57XX | 1 | X | X | X |
2278 * BCM5717 | 1 | 8 | 2 | 9 |
2279 * BCM5719 | 1 | 8 | 2 | 9 |
2280 * BCM5720 | 1 | 8 | 2 | 9 |
2282 * | F2 Cu | F2 Sr | F3 Cu | F3 Sr |
2283 * ---------+-------+-------+-------+-------+
2284 * BCM57XX | X | X | X | X |
2285 * BCM5717 | X | X | X | X |
2286 * BCM5719 | 3 | 10 | 4 | 11 |
2287 * BCM5720 | X | X | X | X |
2289 * Other addresses may respond but they are not
2290 * IEEE compliant PHYs and should be ignored.
2292 if (BNX_IS_5717_PLUS(sc)) {
2293 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_A0) {
2294 if (CSR_READ_4(sc, BGE_SGDIG_STS) &
2295 BGE_SGDIGSTS_IS_SERDES)
2296 sc->bnx_phyno = sc->bnx_func_addr + 8;
2298 sc->bnx_phyno = sc->bnx_func_addr + 1;
2300 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2301 BGE_CPMU_PHY_STRAP_IS_SERDES)
2302 sc->bnx_phyno = sc->bnx_func_addr + 8;
2304 sc->bnx_phyno = sc->bnx_func_addr + 1;
2308 if (sc->bnx_flags & BNX_FLAG_TBI) {
2309 ifmedia_init(&sc->bnx_ifmedia, IFM_IMASK,
2310 bnx_ifmedia_upd, bnx_ifmedia_sts);
2311 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2312 ifmedia_add(&sc->bnx_ifmedia,
2313 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2314 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2315 ifmedia_set(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO);
2316 sc->bnx_ifmedia.ifm_media = sc->bnx_ifmedia.ifm_cur->ifm_media;
2318 struct mii_probe_args mii_args;
2320 mii_probe_args_init(&mii_args, bnx_ifmedia_upd, bnx_ifmedia_sts);
2321 mii_args.mii_probemask = 1 << sc->bnx_phyno;
2322 mii_args.mii_capmask = capmask;
2323 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY;
2324 mii_args.mii_priv = mii_priv;
2326 error = mii_probe(dev, &sc->bnx_miibus, &mii_args);
2328 device_printf(dev, "MII without any PHY!\n");
2333 ctx = device_get_sysctl_ctx(sc->bnx_dev);
2334 tree = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bnx_dev));
2336 SYSCTL_ADD_INT(ctx, tree, OID_AUTO,
2337 "rx_rings", CTLFLAG_RD, &sc->bnx_rx_retcnt, 0, "# of RX rings");
2338 SYSCTL_ADD_INT(ctx, tree, OID_AUTO,
2339 "tx_rings", CTLFLAG_RD, &sc->bnx_tx_ringcnt, 0, "# of TX rings");
2341 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "rx_coal_ticks",
2342 CTLTYPE_INT | CTLFLAG_RW,
2343 sc, 0, bnx_sysctl_rx_coal_ticks, "I",
2344 "Receive coalescing ticks (usec).");
2345 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "tx_coal_ticks",
2346 CTLTYPE_INT | CTLFLAG_RW,
2347 sc, 0, bnx_sysctl_tx_coal_ticks, "I",
2348 "Transmit coalescing ticks (usec).");
2349 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "rx_coal_bds",
2350 CTLTYPE_INT | CTLFLAG_RW,
2351 sc, 0, bnx_sysctl_rx_coal_bds, "I",
2352 "Receive max coalesced BD count.");
2353 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "rx_coal_bds_poll",
2354 CTLTYPE_INT | CTLFLAG_RW,
2355 sc, 0, bnx_sysctl_rx_coal_bds_poll, "I",
2356 "Receive max coalesced BD count in polling.");
2357 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "tx_coal_bds",
2358 CTLTYPE_INT | CTLFLAG_RW,
2359 sc, 0, bnx_sysctl_tx_coal_bds, "I",
2360 "Transmit max coalesced BD count.");
2361 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "tx_coal_bds_poll",
2362 CTLTYPE_INT | CTLFLAG_RW,
2363 sc, 0, bnx_sysctl_tx_coal_bds_poll, "I",
2364 "Transmit max coalesced BD count in polling.");
2366 * A common design characteristic for many Broadcom
2367 * client controllers is that they only support a
2368 * single outstanding DMA read operation on the PCIe
2369 * bus. This means that it will take twice as long to
2370 * fetch a TX frame that is split into header and
2371 * payload buffers as it does to fetch a single,
2372 * contiguous TX frame (2 reads vs. 1 read). For these
2373 * controllers, coalescing buffers to reduce the number
2374 * of memory reads is effective way to get maximum
2375 * performance(about 940Mbps). Without collapsing TX
2376 * buffers the maximum TCP bulk transfer performance
2377 * is about 850Mbps. However forcing coalescing mbufs
2378 * consumes a lot of CPU cycles, so leave it off by
2381 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO,
2382 "force_defrag", CTLTYPE_INT | CTLFLAG_RW,
2383 sc, 0, bnx_sysctl_force_defrag, "I",
2384 "Force defragment on TX path");
2386 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO,
2387 "tx_wreg", CTLTYPE_INT | CTLFLAG_RW,
2388 sc, 0, bnx_sysctl_tx_wreg, "I",
2389 "# of segments before writing to hardware register");
2391 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO,
2392 "std_refill", CTLTYPE_INT | CTLFLAG_RW,
2393 sc, 0, bnx_sysctl_std_refill, "I",
2394 "# of packets received before scheduling standard refilling");
2396 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO,
2397 "rx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2398 sc, 0, bnx_sysctl_rx_coal_bds_int, "I",
2399 "Receive max coalesced BD count during interrupt.");
2400 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO,
2401 "tx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2402 sc, 0, bnx_sysctl_tx_coal_bds_int, "I",
2403 "Transmit max coalesced BD count during interrupt.");
2405 #ifdef IFPOLL_ENABLE
2406 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) {
2407 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO,
2408 "npoll_offset", CTLTYPE_INT | CTLFLAG_RW,
2409 sc, 0, bnx_sysctl_npoll_offset, "I",
2410 "NPOLLING cpu offset");
2412 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO,
2413 "npoll_rxoff", CTLTYPE_INT | CTLFLAG_RW,
2414 sc, 0, bnx_sysctl_npoll_rxoff, "I",
2415 "NPOLLING RX cpu offset");
2416 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO,
2417 "npoll_txoff", CTLTYPE_INT | CTLFLAG_RW,
2418 sc, 0, bnx_sysctl_npoll_txoff, "I",
2419 "NPOLLING TX cpu offset");
2423 #ifdef BNX_RSS_DEBUG
2424 SYSCTL_ADD_INT(ctx, tree, OID_AUTO,
2425 "std_refill_mask", CTLFLAG_RD,
2426 &sc->bnx_rx_std_ring.bnx_rx_std_refill, 0, "");
2427 SYSCTL_ADD_INT(ctx, tree, OID_AUTO,
2428 "std_used", CTLFLAG_RD,
2429 &sc->bnx_rx_std_ring.bnx_rx_std_used, 0, "");
2430 SYSCTL_ADD_INT(ctx, tree, OID_AUTO,
2431 "rss_debug", CTLFLAG_RW, &sc->bnx_rss_debug, 0, "");
2432 for (i = 0; i < sc->bnx_rx_retcnt; ++i) {
2433 ksnprintf(desc, sizeof(desc), "rx_pkt%d", i);
2434 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO,
2435 desc, CTLFLAG_RW, &sc->bnx_rx_ret_ring[i].bnx_rx_pkt, "");
2437 ksnprintf(desc, sizeof(desc), "rx_force_sched%d", i);
2438 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO,
2440 &sc->bnx_rx_ret_ring[i].bnx_rx_force_sched, "");
2443 #ifdef BNX_TSS_DEBUG
2444 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
2445 ksnprintf(desc, sizeof(desc), "tx_pkt%d", i);
2446 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO,
2447 desc, CTLFLAG_RW, &sc->bnx_tx_ring[i].bnx_tx_pkt, "");
2451 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO,
2452 "norxbds", CTLFLAG_RW, &sc->bnx_norxbds, "");
2454 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO,
2455 "errors", CTLFLAG_RW, &sc->bnx_errors, "");
2457 #ifdef BNX_TSO_DEBUG
2458 for (i = 0; i < BNX_TSO_NSTATS; ++i) {
2459 ksnprintf(desc, sizeof(desc), "tso%d", i + 1);
2460 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO,
2461 desc, CTLFLAG_RW, &sc->bnx_tsosegs[i], "");
2466 * Call MI attach routine.
2468 ether_ifattach(ifp, ether_addr, NULL);
2470 /* Setup TX rings and subqueues */
2471 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
2472 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i);
2473 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
2475 ifsq_set_cpuid(ifsq, txr->bnx_tx_cpuid);
2476 ifsq_set_hw_serialize(ifsq, &txr->bnx_tx_serialize);
2477 ifsq_set_priv(ifsq, txr);
2478 txr->bnx_ifsq = ifsq;
2480 ifsq_watchdog_init(&txr->bnx_tx_watchdog, ifsq, bnx_watchdog);
2483 device_printf(dev, "txr %d -> cpu%d\n", i,
2488 error = bnx_setup_intr(sc);
2490 ether_ifdetach(ifp);
2493 bnx_set_tick_cpuid(sc, FALSE);
2496 * Create RX standard ring refilling thread
2498 std_cpuid_def = device_get_unit(dev) % ncpus;
2499 std_cpuid = device_getenv_int(dev, "std.cpuid", std_cpuid_def);
2500 if (std_cpuid < 0 || std_cpuid >= ncpus) {
2501 device_printf(dev, "invalid std.cpuid %d, use %d\n",
2502 std_cpuid, std_cpuid_def);
2503 std_cpuid = std_cpuid_def;
2506 std = &sc->bnx_rx_std_ring;
2507 lwkt_create(bnx_rx_std_refill_ithread, std, &std->bnx_rx_std_ithread,
2508 NULL, TDF_NOSTART | TDF_INTTHREAD, std_cpuid,
2509 "%s std", device_get_nameunit(dev));
2510 lwkt_setpri(std->bnx_rx_std_ithread, TDPRI_INT_MED);
2511 std->bnx_rx_std_ithread->td_preemptable = lwkt_preempt;
2520 bnx_detach(device_t dev)
2522 struct bnx_softc *sc = device_get_softc(dev);
2523 struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring;
2525 if (device_is_attached(dev)) {
2526 struct ifnet *ifp = &sc->arpcom.ac_if;
2528 ifnet_serialize_all(ifp);
2530 bnx_teardown_intr(sc, sc->bnx_intr_cnt);
2531 ifnet_deserialize_all(ifp);
2533 ether_ifdetach(ifp);
2536 if (std->bnx_rx_std_ithread != NULL) {
2537 tsleep_interlock(std, 0);
2539 if (std->bnx_rx_std_ithread->td_gd == mycpu) {
2540 bnx_rx_std_refill_stop(std);
2542 lwkt_send_ipiq(std->bnx_rx_std_ithread->td_gd,
2543 bnx_rx_std_refill_stop, std);
2546 tsleep(std, PINTERLOCKED, "bnx_detach", 0);
2548 device_printf(dev, "RX std ithread exited\n");
2550 lwkt_synchronize_ipiqs("bnx_detach_ipiq");
2553 if (sc->bnx_flags & BNX_FLAG_TBI)
2554 ifmedia_removeall(&sc->bnx_ifmedia);
2556 device_delete_child(dev, sc->bnx_miibus);
2557 bus_generic_detach(dev);
2561 if (sc->bnx_msix_mem_res != NULL) {
2562 bus_release_resource(dev, SYS_RES_MEMORY, sc->bnx_msix_mem_rid,
2563 sc->bnx_msix_mem_res);
2565 if (sc->bnx_res != NULL) {
2566 bus_release_resource(dev, SYS_RES_MEMORY,
2567 BGE_PCI_BAR0, sc->bnx_res);
2569 if (sc->bnx_res2 != NULL) {
2570 bus_release_resource(dev, SYS_RES_MEMORY,
2571 PCIR_BAR(2), sc->bnx_res2);
2576 if (sc->bnx_serialize != NULL)
2577 kfree(sc->bnx_serialize, M_DEVBUF);
2583 bnx_reset(struct bnx_softc *sc)
2585 device_t dev = sc->bnx_dev;
2586 uint32_t cachesize, command, reset, mac_mode, mac_mode_mask;
2587 void (*write_op)(struct bnx_softc *, uint32_t, uint32_t);
2591 mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE;
2592 if (sc->bnx_mfw_flags & BNX_MFW_ON_APE)
2593 mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
2594 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask;
2596 write_op = bnx_writemem_direct;
2598 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
2599 for (i = 0; i < 8000; i++) {
2600 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
2605 if_printf(&sc->arpcom.ac_if, "NVRAM lock timedout!\n");
2607 /* Take APE lock when performing reset. */
2608 bnx_ape_lock(sc, BGE_APE_LOCK_GRC);
2610 /* Save some important PCI state. */
2611 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2612 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2614 pci_write_config(dev, BGE_PCI_MISC_CTL,
2615 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2616 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2617 BGE_PCIMISCCTL_TAGGED_STATUS, 4);
2619 /* Disable fastboot on controllers that support it. */
2621 if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n");
2622 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2625 * Write the magic number to SRAM at offset 0xB50.
2626 * When firmware finishes its initialization it will
2627 * write ~BGE_SRAM_FW_MB_MAGIC to the same location.
2629 bnx_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
2631 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2633 /* XXX: Broadcom Linux driver. */
2634 /* Force PCI-E 1.0a mode */
2635 if (!BNX_IS_57765_PLUS(sc) &&
2636 CSR_READ_4(sc, BGE_PCIE_PHY_TSTCTL) ==
2637 (BGE_PCIE_PHY_TSTCTL_PSCRAM |
2638 BGE_PCIE_PHY_TSTCTL_PCIE10)) {
2639 CSR_WRITE_4(sc, BGE_PCIE_PHY_TSTCTL,
2640 BGE_PCIE_PHY_TSTCTL_PSCRAM);
2642 if (sc->bnx_chipid != BGE_CHIPID_BCM5750_A0) {
2643 /* Prevent PCIE link training during global reset */
2644 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2649 * Set GPHY Power Down Override to leave GPHY
2650 * powered up in D0 uninitialized.
2652 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0)
2653 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
2655 /* Issue global reset */
2656 write_op(sc, BGE_MISC_CFG, reset);
2660 /* XXX: Broadcom Linux driver. */
2661 if (sc->bnx_chipid == BGE_CHIPID_BCM5750_A0) {
2664 DELAY(500000); /* wait for link training to complete */
2665 v = pci_read_config(dev, 0xc4, 4);
2666 pci_write_config(dev, 0xc4, v | (1<<15), 4);
2669 devctl = pci_read_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL, 2);
2671 /* Disable no snoop and disable relaxed ordering. */
2672 devctl &= ~(PCIEM_DEVCTL_RELAX_ORDER | PCIEM_DEVCTL_NOSNOOP);
2674 /* Old PCI-E chips only support 128 bytes Max PayLoad Size. */
2675 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0) {
2676 devctl &= ~PCIEM_DEVCTL_MAX_PAYLOAD_MASK;
2677 devctl |= PCIEM_DEVCTL_MAX_PAYLOAD_128;
2680 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL,
2683 /* Clear error status. */
2684 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVSTS,
2685 PCIEM_DEVSTS_CORR_ERR |
2686 PCIEM_DEVSTS_NFATAL_ERR |
2687 PCIEM_DEVSTS_FATAL_ERR |
2688 PCIEM_DEVSTS_UNSUPP_REQ, 2);
2690 /* Reset some of the PCI state that got zapped by reset */
2691 pci_write_config(dev, BGE_PCI_MISC_CTL,
2692 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2693 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2694 BGE_PCIMISCCTL_TAGGED_STATUS, 4);
2695 val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE;
2696 if (sc->bnx_mfw_flags & BNX_MFW_ON_APE) {
2697 val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
2698 BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
2699 BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
2701 pci_write_config(dev, BGE_PCI_PCISTATE, val, 4);
2702 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2703 pci_write_config(dev, BGE_PCI_CMD, command, 4);
2705 /* Enable memory arbiter */
2706 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2708 /* Fix up byte swapping */
2709 CSR_WRITE_4(sc, BGE_MODE_CTL, bnx_dma_swap_options(sc));
2711 val = CSR_READ_4(sc, BGE_MAC_MODE);
2712 val = (val & ~mac_mode_mask) | mac_mode;
2713 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
2716 bnx_ape_unlock(sc, BGE_APE_LOCK_GRC);
2719 * Poll until we see the 1's complement of the magic number.
2720 * This indicates that the firmware initialization is complete.
2722 for (i = 0; i < BNX_FIRMWARE_TIMEOUT; i++) {
2723 val = bnx_readmem_ind(sc, BGE_SRAM_FW_MB);
2724 if (val == ~BGE_SRAM_FW_MB_MAGIC)
2728 if (i == BNX_FIRMWARE_TIMEOUT) {
2729 if_printf(&sc->arpcom.ac_if, "firmware handshake "
2730 "timed out, found 0x%08x\n", val);
2733 /* BCM57765 A0 needs additional time before accessing. */
2734 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0)
2738 * The 5704 in TBI mode apparently needs some special
2739 * adjustment to insure the SERDES drive level is set
2742 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704 &&
2743 (sc->bnx_flags & BNX_FLAG_TBI)) {
2746 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2747 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2748 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2751 CSR_WRITE_4(sc, BGE_MI_MODE,
2752 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
2755 /* XXX: Broadcom Linux driver. */
2756 if (!BNX_IS_57765_PLUS(sc)) {
2759 /* Enable Data FIFO protection. */
2760 v = CSR_READ_4(sc, BGE_PCIE_TLDLPL_PORT);
2761 CSR_WRITE_4(sc, BGE_PCIE_TLDLPL_PORT, v | (1 << 25));
2766 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
2767 BNX_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
2768 CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
2773 * Frame reception handling. This is called if there's a frame
2774 * on the receive return list.
2776 * Note: we have to be able to handle two possibilities here:
2777 * 1) the frame is from the jumbo recieve ring
2778 * 2) the frame is from the standard receive ring
2782 bnx_rxeof(struct bnx_rx_ret_ring *ret, uint16_t rx_prod, int count)
2784 struct bnx_softc *sc = ret->bnx_sc;
2785 struct bnx_rx_std_ring *std = ret->bnx_std;
2786 struct ifnet *ifp = &sc->arpcom.ac_if;
2787 int std_used = 0, cpuid = mycpuid;
2789 while (ret->bnx_rx_saved_considx != rx_prod && count != 0) {
2790 struct pktinfo pi0, *pi = NULL;
2791 struct bge_rx_bd *cur_rx;
2792 struct bnx_rx_buf *rb;
2794 struct mbuf *m = NULL;
2795 uint16_t vlan_tag = 0;
2800 cur_rx = &ret->bnx_rx_ret_ring[ret->bnx_rx_saved_considx];
2802 rxidx = cur_rx->bge_idx;
2803 KKASSERT(rxidx < BGE_STD_RX_RING_CNT);
2805 BNX_INC(ret->bnx_rx_saved_considx, BNX_RETURN_RING_CNT);
2806 #ifdef BNX_RSS_DEBUG
2810 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2812 vlan_tag = cur_rx->bge_vlan_tag;
2815 if (ret->bnx_rx_cnt >= ret->bnx_rx_cntmax) {
2816 atomic_add_int(&std->bnx_rx_std_used, std_used);
2819 bnx_rx_std_refill_sched(ret, std);
2824 rb = &std->bnx_rx_std_buf[rxidx];
2825 m = rb->bnx_rx_mbuf;
2826 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2827 IFNET_STAT_INC(ifp, ierrors, 1);
2829 rb->bnx_rx_refilled = 1;
2832 if (bnx_newbuf_std(ret, rxidx, 0)) {
2833 IFNET_STAT_INC(ifp, ierrors, 1);
2837 IFNET_STAT_INC(ifp, ipackets, 1);
2838 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2839 m->m_pkthdr.rcvif = ifp;
2841 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2842 (cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
2843 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2844 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2845 if ((cur_rx->bge_error_flag &
2846 BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
2847 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2849 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2850 m->m_pkthdr.csum_data =
2851 cur_rx->bge_tcp_udp_csum;
2852 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
2856 if (ifp->if_capenable & IFCAP_RSS) {
2857 pi = bnx_rss_info(&pi0, cur_rx);
2859 (cur_rx->bge_flags & BGE_RXBDFLAG_RSS_HASH)) {
2860 m->m_flags |= M_HASH;
2862 toeplitz_hash(cur_rx->bge_hash);
2867 * If we received a packet with a vlan tag, pass it
2868 * to vlan_input() instead of ether_input().
2871 m->m_flags |= M_VLANTAG;
2872 m->m_pkthdr.ether_vlantag = vlan_tag;
2874 ifp->if_input(ifp, m, pi, cpuid);
2876 bnx_writembx(sc, ret->bnx_rx_mbx, ret->bnx_rx_saved_considx);
2881 cur_std_used = atomic_fetchadd_int(&std->bnx_rx_std_used,
2883 if (cur_std_used + std_used >= (BGE_STD_RX_RING_CNT / 2)) {
2884 #ifdef BNX_RSS_DEBUG
2885 ret->bnx_rx_force_sched++;
2887 bnx_rx_std_refill_sched(ret, std);
2893 bnx_txeof(struct bnx_tx_ring *txr, uint16_t tx_cons)
2895 struct ifnet *ifp = &txr->bnx_sc->arpcom.ac_if;
2898 * Go through our tx ring and free mbufs for those
2899 * frames that have been sent.
2901 while (txr->bnx_tx_saved_considx != tx_cons) {
2902 struct bnx_tx_buf *buf;
2905 idx = txr->bnx_tx_saved_considx;
2906 buf = &txr->bnx_tx_buf[idx];
2907 if (buf->bnx_tx_mbuf != NULL) {
2908 IFNET_STAT_INC(ifp, opackets, 1);
2909 #ifdef BNX_TSS_DEBUG
2912 bus_dmamap_unload(txr->bnx_tx_mtag,
2913 buf->bnx_tx_dmamap);
2914 m_freem(buf->bnx_tx_mbuf);
2915 buf->bnx_tx_mbuf = NULL;
2918 BNX_INC(txr->bnx_tx_saved_considx, BGE_TX_RING_CNT);
2921 if ((BGE_TX_RING_CNT - txr->bnx_tx_cnt) >=
2922 (BNX_NSEG_RSVD + BNX_NSEG_SPARE))
2923 ifsq_clr_oactive(txr->bnx_ifsq);
2925 if (txr->bnx_tx_cnt == 0)
2926 txr->bnx_tx_watchdog.wd_timer = 0;
2928 if (!ifsq_is_empty(txr->bnx_ifsq))
2929 ifsq_devstart(txr->bnx_ifsq);
2933 bnx_handle_status(struct bnx_softc *sc)
2938 status = *sc->bnx_hw_status;
2940 if (status & BGE_STATFLAG_ERROR) {
2946 val = CSR_READ_4(sc, BGE_FLOW_ATTN);
2947 if (val & ~BGE_FLOWATTN_MB_LOWAT) {
2948 if_printf(&sc->arpcom.ac_if,
2949 "flow attn 0x%08x\n", val);
2953 val = CSR_READ_4(sc, BGE_MSI_STATUS);
2954 if (val & ~BGE_MSISTAT_MSI_PCI_REQ) {
2955 if_printf(&sc->arpcom.ac_if,
2956 "msi status 0x%08x\n", val);
2960 val = CSR_READ_4(sc, BGE_RDMA_STATUS);
2962 if_printf(&sc->arpcom.ac_if,
2963 "rmda status 0x%08x\n", val);
2967 val = CSR_READ_4(sc, BGE_WDMA_STATUS);
2969 if_printf(&sc->arpcom.ac_if,
2970 "wdma status 0x%08x\n", val);
2975 bnx_serialize_skipmain(sc);
2977 bnx_deserialize_skipmain(sc);
2982 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) || sc->bnx_link_evt) {
2984 if_printf(&sc->arpcom.ac_if, "link change, "
2985 "link_evt %d\n", sc->bnx_link_evt);
2994 #ifdef IFPOLL_ENABLE
2997 bnx_npoll_rx(struct ifnet *ifp __unused, void *xret, int cycle)
2999 struct bnx_rx_ret_ring *ret = xret;
3002 ASSERT_SERIALIZED(&ret->bnx_rx_ret_serialize);
3004 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag;
3007 rx_prod = *ret->bnx_rx_considx;
3008 if (ret->bnx_rx_saved_considx != rx_prod)
3009 bnx_rxeof(ret, rx_prod, cycle);
3013 bnx_npoll_tx_notag(struct ifnet *ifp __unused, void *xtxr, int cycle __unused)
3015 struct bnx_tx_ring *txr = xtxr;
3018 ASSERT_SERIALIZED(&txr->bnx_tx_serialize);
3020 tx_cons = *txr->bnx_tx_considx;
3021 if (txr->bnx_tx_saved_considx != tx_cons)
3022 bnx_txeof(txr, tx_cons);
3026 bnx_npoll_tx(struct ifnet *ifp, void *xtxr, int cycle)
3028 struct bnx_tx_ring *txr = xtxr;
3030 ASSERT_SERIALIZED(&txr->bnx_tx_serialize);
3032 txr->bnx_saved_status_tag = *txr->bnx_hw_status_tag;
3034 bnx_npoll_tx_notag(ifp, txr, cycle);
3038 bnx_npoll_status_notag(struct ifnet *ifp)
3040 struct bnx_softc *sc = ifp->if_softc;
3042 ASSERT_SERIALIZED(&sc->bnx_main_serialize);
3044 if (bnx_handle_status(sc)) {
3046 * Status changes are handled; force the chip to
3047 * update the status block to reflect whether there
3048 * are more status changes or not, else staled status
3049 * changes are always seen.
3051 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3056 bnx_npoll_status(struct ifnet *ifp)
3058 struct bnx_softc *sc = ifp->if_softc;
3060 ASSERT_SERIALIZED(&sc->bnx_main_serialize);
3062 sc->bnx_saved_status_tag = *sc->bnx_hw_status_tag;
3064 bnx_npoll_status_notag(ifp);
3068 bnx_npoll(struct ifnet *ifp, struct ifpoll_info *info)
3070 struct bnx_softc *sc = ifp->if_softc;
3073 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3076 if (sc->bnx_flags & BNX_FLAG_STATUS_HASTAG)
3077 info->ifpi_status.status_func = bnx_npoll_status;
3079 info->ifpi_status.status_func = bnx_npoll_status_notag;
3080 info->ifpi_status.serializer = &sc->bnx_main_serialize;
3082 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
3083 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
3084 int idx = i + sc->bnx_npoll_txoff;
3086 KKASSERT(idx < ncpus2);
3087 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) {
3088 info->ifpi_tx[idx].poll_func =
3091 info->ifpi_tx[idx].poll_func = bnx_npoll_tx;
3093 info->ifpi_tx[idx].arg = txr;
3094 info->ifpi_tx[idx].serializer = &txr->bnx_tx_serialize;
3095 ifsq_set_cpuid(txr->bnx_ifsq, idx);
3098 for (i = 0; i < sc->bnx_rx_retcnt; ++i) {
3099 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i];
3100 int idx = i + sc->bnx_npoll_rxoff;
3102 KKASSERT(idx < ncpus2);
3103 info->ifpi_rx[idx].poll_func = bnx_npoll_rx;
3104 info->ifpi_rx[idx].arg = ret;
3105 info->ifpi_rx[idx].serializer =
3106 &ret->bnx_rx_ret_serialize;
3109 if (ifp->if_flags & IFF_RUNNING) {
3110 bnx_disable_intr(sc);
3111 bnx_set_tick_cpuid(sc, TRUE);
3113 sc->bnx_coal_chg = BNX_TX_COAL_BDS_CHG |
3114 BNX_RX_COAL_BDS_CHG;
3115 bnx_coal_change(sc);
3118 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
3119 ifsq_set_cpuid(sc->bnx_tx_ring[i].bnx_ifsq,
3120 sc->bnx_tx_ring[i].bnx_tx_cpuid);
3122 if (ifp->if_flags & IFF_RUNNING) {
3123 sc->bnx_coal_chg = BNX_TX_COAL_BDS_CHG |
3124 BNX_RX_COAL_BDS_CHG;
3125 bnx_coal_change(sc);
3127 bnx_enable_intr(sc);
3128 bnx_set_tick_cpuid(sc, FALSE);
3133 #endif /* IFPOLL_ENABLE */
3136 bnx_intr_legacy(void *xsc)
3138 struct bnx_softc *sc = xsc;
3139 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0];
3141 if (ret->bnx_saved_status_tag == *ret->bnx_hw_status_tag) {
3144 val = pci_read_config(sc->bnx_dev, BGE_PCI_PCISTATE, 4);
3145 if (val & BGE_PCISTAT_INTR_NOTACT)
3151 * Interrupt will have to be disabled if tagged status
3152 * is used, else interrupt will always be asserted on
3153 * certain chips (at least on BCM5750 AX/BX).
3155 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3167 bnx_intr(struct bnx_softc *sc)
3169 struct ifnet *ifp = &sc->arpcom.ac_if;
3170 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0];
3172 ASSERT_SERIALIZED(&sc->bnx_main_serialize);
3174 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag;
3176 * Use a load fence to ensure that status_tag is saved
3177 * before rx_prod, tx_cons and status.
3181 bnx_handle_status(sc);
3183 if (ifp->if_flags & IFF_RUNNING) {
3184 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0];
3185 uint16_t rx_prod, tx_cons;
3187 lwkt_serialize_enter(&ret->bnx_rx_ret_serialize);
3188 rx_prod = *ret->bnx_rx_considx;
3189 if (ret->bnx_rx_saved_considx != rx_prod)
3190 bnx_rxeof(ret, rx_prod, -1);
3191 lwkt_serialize_exit(&ret->bnx_rx_ret_serialize);
3193 lwkt_serialize_enter(&txr->bnx_tx_serialize);
3194 tx_cons = *txr->bnx_tx_considx;
3195 if (txr->bnx_tx_saved_considx != tx_cons)
3196 bnx_txeof(txr, tx_cons);
3197 lwkt_serialize_exit(&txr->bnx_tx_serialize);
3200 bnx_writembx(sc, BGE_MBX_IRQ0_LO, ret->bnx_saved_status_tag << 24);
3204 bnx_msix_tx_status(void *xtxr)
3206 struct bnx_tx_ring *txr = xtxr;
3207 struct bnx_softc *sc = txr->bnx_sc;
3208 struct ifnet *ifp = &sc->arpcom.ac_if;
3210 ASSERT_SERIALIZED(&sc->bnx_main_serialize);
3212 txr->bnx_saved_status_tag = *txr->bnx_hw_status_tag;
3214 * Use a load fence to ensure that status_tag is saved
3215 * before tx_cons and status.
3219 bnx_handle_status(sc);
3221 if (ifp->if_flags & IFF_RUNNING) {
3224 lwkt_serialize_enter(&txr->bnx_tx_serialize);
3225 tx_cons = *txr->bnx_tx_considx;
3226 if (txr->bnx_tx_saved_considx != tx_cons)
3227 bnx_txeof(txr, tx_cons);
3228 lwkt_serialize_exit(&txr->bnx_tx_serialize);
3231 bnx_writembx(sc, BGE_MBX_IRQ0_LO, txr->bnx_saved_status_tag << 24);
3235 bnx_msix_rx(void *xret)
3237 struct bnx_rx_ret_ring *ret = xret;
3240 ASSERT_SERIALIZED(&ret->bnx_rx_ret_serialize);
3242 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag;
3244 * Use a load fence to ensure that status_tag is saved
3249 rx_prod = *ret->bnx_rx_considx;
3250 if (ret->bnx_rx_saved_considx != rx_prod)
3251 bnx_rxeof(ret, rx_prod, -1);
3253 bnx_writembx(ret->bnx_sc, ret->bnx_msix_mbx,
3254 ret->bnx_saved_status_tag << 24);
3258 bnx_msix_rxtx(void *xret)
3260 struct bnx_rx_ret_ring *ret = xret;
3261 struct bnx_tx_ring *txr = ret->bnx_txr;
3262 uint16_t rx_prod, tx_cons;
3264 ASSERT_SERIALIZED(&ret->bnx_rx_ret_serialize);
3266 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag;
3268 * Use a load fence to ensure that status_tag is saved
3269 * before rx_prod and tx_cons.
3273 rx_prod = *ret->bnx_rx_considx;
3274 if (ret->bnx_rx_saved_considx != rx_prod)
3275 bnx_rxeof(ret, rx_prod, -1);
3277 lwkt_serialize_enter(&txr->bnx_tx_serialize);
3278 tx_cons = *txr->bnx_tx_considx;
3279 if (txr->bnx_tx_saved_considx != tx_cons)
3280 bnx_txeof(txr, tx_cons);
3281 lwkt_serialize_exit(&txr->bnx_tx_serialize);
3283 bnx_writembx(ret->bnx_sc, ret->bnx_msix_mbx,
3284 ret->bnx_saved_status_tag << 24);
3288 bnx_msix_status(void *xsc)
3290 struct bnx_softc *sc = xsc;
3292 ASSERT_SERIALIZED(&sc->bnx_main_serialize);
3294 sc->bnx_saved_status_tag = *sc->bnx_hw_status_tag;
3296 * Use a load fence to ensure that status_tag is saved
3301 bnx_handle_status(sc);
3303 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_saved_status_tag << 24);
3309 struct bnx_softc *sc = xsc;
3311 lwkt_serialize_enter(&sc->bnx_main_serialize);
3313 bnx_stats_update_regs(sc);
3315 if (sc->bnx_flags & BNX_FLAG_TBI) {
3317 * Since in TBI mode auto-polling can't be used we should poll
3318 * link status manually. Here we register pending link event
3319 * and trigger interrupt.
3322 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3323 } else if (!sc->bnx_link) {
3324 mii_tick(device_get_softc(sc->bnx_miibus));
3327 callout_reset_bycpu(&sc->bnx_tick_timer, hz, bnx_tick, sc,
3328 sc->bnx_tick_cpuid);
3330 lwkt_serialize_exit(&sc->bnx_main_serialize);
3334 bnx_stats_update_regs(struct bnx_softc *sc)
3336 struct ifnet *ifp = &sc->arpcom.ac_if;
3337 struct bge_mac_stats_regs stats;
3341 s = (uint32_t *)&stats;
3342 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
3343 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
3347 IFNET_STAT_SET(ifp, collisions,
3348 (stats.dot3StatsSingleCollisionFrames +
3349 stats.dot3StatsMultipleCollisionFrames +
3350 stats.dot3StatsExcessiveCollisions +
3351 stats.dot3StatsLateCollisions));
3353 val = CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
3354 sc->bnx_norxbds += val;
3358 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3359 * pointers to descriptors.
3362 bnx_encap(struct bnx_tx_ring *txr, struct mbuf **m_head0, uint32_t *txidx,
3365 struct bge_tx_bd *d = NULL;
3366 uint16_t csum_flags = 0, vlan_tag = 0, mss = 0;
3367 bus_dma_segment_t segs[BNX_NSEG_NEW];
3369 int error, maxsegs, nsegs, idx, i;
3370 struct mbuf *m_head = *m_head0, *m_new;
3372 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3373 #ifdef BNX_TSO_DEBUG
3377 error = bnx_setup_tso(txr, m_head0, &mss, &csum_flags);
3382 #ifdef BNX_TSO_DEBUG
3383 tso_nsegs = (m_head->m_pkthdr.len /
3384 m_head->m_pkthdr.tso_segsz) - 1;
3385 if (tso_nsegs > (BNX_TSO_NSTATS - 1))
3386 tso_nsegs = BNX_TSO_NSTATS - 1;
3387 else if (tso_nsegs < 0)
3389 txr->bnx_sc->bnx_tsosegs[tso_nsegs]++;
3391 } else if (m_head->m_pkthdr.csum_flags & BNX_CSUM_FEATURES) {
3392 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
3393 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3394 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
3395 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3396 if (m_head->m_flags & M_LASTFRAG)
3397 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3398 else if (m_head->m_flags & M_FRAG)
3399 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3401 if (m_head->m_flags & M_VLANTAG) {
3402 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
3403 vlan_tag = m_head->m_pkthdr.ether_vlantag;
3407 map = txr->bnx_tx_buf[idx].bnx_tx_dmamap;
3409 maxsegs = (BGE_TX_RING_CNT - txr->bnx_tx_cnt) - BNX_NSEG_RSVD;
3410 KASSERT(maxsegs >= BNX_NSEG_SPARE,
3411 ("not enough segments %d", maxsegs));
3413 if (maxsegs > BNX_NSEG_NEW)
3414 maxsegs = BNX_NSEG_NEW;
3417 * Pad outbound frame to BGE_MIN_FRAMELEN for an unusual reason.
3418 * The bge hardware will pad out Tx runts to BGE_MIN_FRAMELEN,
3419 * but when such padded frames employ the bge IP/TCP checksum
3420 * offload, the hardware checksum assist gives incorrect results
3421 * (possibly from incorporating its own padding into the UDP/TCP
3422 * checksum; who knows). If we pad such runts with zeros, the
3423 * onboard checksum comes out correct.
3425 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
3426 m_head->m_pkthdr.len < BNX_MIN_FRAMELEN) {
3427 error = m_devpad(m_head, BNX_MIN_FRAMELEN);
3432 if ((txr->bnx_tx_flags & BNX_TX_FLAG_SHORTDMA) &&
3433 m_head->m_next != NULL) {
3434 m_new = bnx_defrag_shortdma(m_head);
3435 if (m_new == NULL) {
3439 *m_head0 = m_head = m_new;
3441 if ((m_head->m_pkthdr.csum_flags & CSUM_TSO) == 0 &&
3442 (txr->bnx_tx_flags & BNX_TX_FLAG_FORCE_DEFRAG) &&
3443 m_head->m_next != NULL) {
3445 * Forcefully defragment mbuf chain to overcome hardware
3446 * limitation which only support a single outstanding
3447 * DMA read operation. If it fails, keep moving on using
3448 * the original mbuf chain.
3450 m_new = m_defrag(m_head, M_NOWAIT);
3452 *m_head0 = m_head = m_new;
3455 error = bus_dmamap_load_mbuf_defrag(txr->bnx_tx_mtag, map,
3456 m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
3459 *segs_used += nsegs;
3462 bus_dmamap_sync(txr->bnx_tx_mtag, map, BUS_DMASYNC_PREWRITE);
3464 for (i = 0; ; i++) {
3465 d = &txr->bnx_tx_ring[idx];
3467 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
3468 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
3469 d->bge_len = segs[i].ds_len;
3470 d->bge_flags = csum_flags;
3471 d->bge_vlan_tag = vlan_tag;
3476 BNX_INC(idx, BGE_TX_RING_CNT);
3478 /* Mark the last segment as end of packet... */
3479 d->bge_flags |= BGE_TXBDFLAG_END;
3482 * Insure that the map for this transmission is placed at
3483 * the array index of the last descriptor in this chain.
3485 txr->bnx_tx_buf[*txidx].bnx_tx_dmamap = txr->bnx_tx_buf[idx].bnx_tx_dmamap;
3486 txr->bnx_tx_buf[idx].bnx_tx_dmamap = map;
3487 txr->bnx_tx_buf[idx].bnx_tx_mbuf = m_head;
3488 txr->bnx_tx_cnt += nsegs;
3490 BNX_INC(idx, BGE_TX_RING_CNT);
3501 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3502 * to the mbuf data regions directly in the transmit descriptors.
3505 bnx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
3507 struct bnx_tx_ring *txr = ifsq_get_priv(ifsq);
3508 struct mbuf *m_head = NULL;
3512 KKASSERT(txr->bnx_ifsq == ifsq);
3513 ASSERT_SERIALIZED(&txr->bnx_tx_serialize);
3515 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq))
3518 prodidx = txr->bnx_tx_prodidx;
3520 while (txr->bnx_tx_buf[prodidx].bnx_tx_mbuf == NULL) {
3522 * Sanity check: avoid coming within BGE_NSEG_RSVD
3523 * descriptors of the end of the ring. Also make
3524 * sure there are BGE_NSEG_SPARE descriptors for
3525 * jumbo buffers' or TSO segments' defragmentation.
3527 if ((BGE_TX_RING_CNT - txr->bnx_tx_cnt) <
3528 (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) {
3529 ifsq_set_oactive(ifsq);
3533 m_head = ifsq_dequeue(ifsq);
3538 * Pack the data into the transmit ring. If we
3539 * don't have room, set the OACTIVE flag and wait
3540 * for the NIC to drain the ring.
3542 if (bnx_encap(txr, &m_head, &prodidx, &nsegs)) {
3543 ifsq_set_oactive(ifsq);
3544 IFNET_STAT_INC(ifp, oerrors, 1);
3548 if (nsegs >= txr->bnx_tx_wreg) {
3550 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, prodidx);
3554 ETHER_BPF_MTAP(ifp, m_head);
3557 * Set a timeout in case the chip goes out to lunch.
3559 txr->bnx_tx_watchdog.wd_timer = 5;
3564 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, prodidx);
3566 txr->bnx_tx_prodidx = prodidx;
3572 struct bnx_softc *sc = xsc;
3573 struct ifnet *ifp = &sc->arpcom.ac_if;
3579 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3581 /* Cancel pending I/O and flush buffers. */
3584 bnx_sig_pre_reset(sc, BNX_RESET_START);
3586 bnx_sig_post_reset(sc, BNX_RESET_START);
3591 * Init the various state machines, ring
3592 * control blocks and firmware.
3594 if (bnx_blockinit(sc)) {
3595 if_printf(ifp, "initialization failure\n");
3601 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3602 ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
3604 /* Load our MAC address. */
3605 m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
3606 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3607 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3609 /* Enable or disable promiscuous mode as needed. */
3612 /* Program multicast filter. */
3616 if (bnx_init_rx_ring_std(&sc->bnx_rx_std_ring)) {
3617 if_printf(ifp, "RX ring initialization failed\n");
3622 /* Init jumbo RX ring. */
3623 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) {
3624 if (bnx_init_rx_ring_jumbo(sc)) {
3625 if_printf(ifp, "Jumbo RX ring initialization failed\n");
3631 /* Init our RX return ring index */
3632 for (i = 0; i < sc->bnx_rx_retcnt; ++i) {
3633 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i];
3635 ret->bnx_rx_saved_considx = 0;
3636 ret->bnx_rx_cnt = 0;
3640 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
3641 bnx_init_tx_ring(&sc->bnx_tx_ring[i]);
3643 /* Enable TX MAC state machine lockup fix. */
3644 mode = CSR_READ_4(sc, BGE_TX_MODE);
3645 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
3646 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 ||
3647 sc->bnx_asicrev == BGE_ASICREV_BCM5762) {
3648 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
3649 mode |= CSR_READ_4(sc, BGE_TX_MODE) &
3650 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
3652 /* Turn on transmitter */
3653 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
3656 /* Initialize RSS */
3657 mode = BGE_RXMODE_ENABLE | BGE_RXMODE_IPV6_ENABLE;
3658 if (BNX_RSS_ENABLED(sc)) {
3660 mode |= BGE_RXMODE_RSS_ENABLE |
3661 BGE_RXMODE_RSS_HASH_MASK_BITS |
3662 BGE_RXMODE_RSS_IPV4_HASH |
3663 BGE_RXMODE_RSS_TCP_IPV4_HASH;
3665 /* Turn on receiver */
3666 BNX_SETBIT(sc, BGE_RX_MODE, mode);
3670 * Set the number of good frames to receive after RX MBUF
3671 * Low Watermark has been reached. After the RX MAC receives
3672 * this number of frames, it will drop subsequent incoming
3673 * frames until the MBUF High Watermark is reached.
3675 if (BNX_IS_57765_FAMILY(sc))
3676 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
3678 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
3680 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI ||
3681 sc->bnx_intr_type == PCI_INTR_TYPE_MSIX) {
3683 if_printf(ifp, "MSI_MODE: %#x\n",
3684 CSR_READ_4(sc, BGE_MSI_MODE));
3688 /* Tell firmware we're alive. */
3689 BNX_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3691 /* Enable host interrupts if polling(4) is not enabled. */
3692 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA, 4);
3695 #ifdef IFPOLL_ENABLE
3696 if (ifp->if_flags & IFF_NPOLLING)
3700 bnx_disable_intr(sc);
3702 bnx_enable_intr(sc);
3703 bnx_set_tick_cpuid(sc, polling);
3705 ifp->if_flags |= IFF_RUNNING;
3706 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
3707 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
3709 ifsq_clr_oactive(txr->bnx_ifsq);
3710 ifsq_watchdog_start(&txr->bnx_tx_watchdog);
3713 bnx_ifmedia_upd(ifp);
3715 callout_reset_bycpu(&sc->bnx_tick_timer, hz, bnx_tick, sc,
3716 sc->bnx_tick_cpuid);
3720 * Set media options.
3723 bnx_ifmedia_upd(struct ifnet *ifp)
3725 struct bnx_softc *sc = ifp->if_softc;
3727 /* If this is a 1000baseX NIC, enable the TBI port. */
3728 if (sc->bnx_flags & BNX_FLAG_TBI) {
3729 struct ifmedia *ifm = &sc->bnx_ifmedia;
3731 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3734 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3739 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3740 BNX_CLRBIT(sc, BGE_MAC_MODE,
3741 BGE_MACMODE_HALF_DUPLEX);
3743 BNX_SETBIT(sc, BGE_MAC_MODE,
3744 BGE_MACMODE_HALF_DUPLEX);
3752 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3756 if (mii->mii_instance) {
3757 struct mii_softc *miisc;
3759 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3760 mii_phy_reset(miisc);
3765 * Force an interrupt so that we will call bnx_link_upd
3766 * if needed and clear any pending link state attention.
3767 * Without this we are not getting any further interrupts
3768 * for link state changes and thus will not UP the link and
3769 * not be able to send in bnx_start. The only way to get
3770 * things working was to receive a packet and get an RX
3773 * bnx_tick should help for fiber cards and we might not
3774 * need to do this here if BNX_FLAG_TBI is set but as
3775 * we poll for fiber anyway it should not harm.
3777 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3783 * Report current media status.
3786 bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3788 struct bnx_softc *sc = ifp->if_softc;
3790 if ((ifp->if_flags & IFF_RUNNING) == 0)
3793 if (sc->bnx_flags & BNX_FLAG_TBI) {
3794 ifmr->ifm_status = IFM_AVALID;
3795 ifmr->ifm_active = IFM_ETHER;
3796 if (CSR_READ_4(sc, BGE_MAC_STS) &
3797 BGE_MACSTAT_TBI_PCS_SYNCHED) {
3798 ifmr->ifm_status |= IFM_ACTIVE;
3800 ifmr->ifm_active |= IFM_NONE;
3804 ifmr->ifm_active |= IFM_1000_SX;
3805 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3806 ifmr->ifm_active |= IFM_HDX;
3808 ifmr->ifm_active |= IFM_FDX;
3810 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3813 ifmr->ifm_active = mii->mii_media_active;
3814 ifmr->ifm_status = mii->mii_media_status;
3819 bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
3821 struct bnx_softc *sc = ifp->if_softc;
3822 struct ifreq *ifr = (struct ifreq *)data;
3823 int mask, error = 0;
3825 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3829 if ((!BNX_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) ||
3830 (BNX_IS_JUMBO_CAPABLE(sc) &&
3831 ifr->ifr_mtu > BNX_JUMBO_MTU)) {
3833 } else if (ifp->if_mtu != ifr->ifr_mtu) {
3834 ifp->if_mtu = ifr->ifr_mtu;
3835 if (ifp->if_flags & IFF_RUNNING)
3840 if (ifp->if_flags & IFF_UP) {
3841 if (ifp->if_flags & IFF_RUNNING) {
3842 mask = ifp->if_flags ^ sc->bnx_if_flags;
3845 * If only the state of the PROMISC flag
3846 * changed, then just use the 'set promisc
3847 * mode' command instead of reinitializing
3848 * the entire NIC. Doing a full re-init
3849 * means reloading the firmware and waiting
3850 * for it to start up, which may take a
3851 * second or two. Similarly for ALLMULTI.
3853 if (mask & IFF_PROMISC)
3855 if (mask & IFF_ALLMULTI)
3860 } else if (ifp->if_flags & IFF_RUNNING) {
3863 sc->bnx_if_flags = ifp->if_flags;
3867 if (ifp->if_flags & IFF_RUNNING)
3872 if (sc->bnx_flags & BNX_FLAG_TBI) {
3873 error = ifmedia_ioctl(ifp, ifr,
3874 &sc->bnx_ifmedia, command);
3876 struct mii_data *mii;
3878 mii = device_get_softc(sc->bnx_miibus);
3879 error = ifmedia_ioctl(ifp, ifr,
3880 &mii->mii_media, command);
3884 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3885 if (mask & IFCAP_HWCSUM) {
3886 ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
3887 if (ifp->if_capenable & IFCAP_TXCSUM)
3888 ifp->if_hwassist |= BNX_CSUM_FEATURES;
3890 ifp->if_hwassist &= ~BNX_CSUM_FEATURES;
3892 if (mask & IFCAP_TSO) {
3893 ifp->if_capenable ^= (mask & IFCAP_TSO);
3894 if (ifp->if_capenable & IFCAP_TSO)
3895 ifp->if_hwassist |= CSUM_TSO;
3897 ifp->if_hwassist &= ~CSUM_TSO;
3899 if (mask & IFCAP_RSS)
3900 ifp->if_capenable ^= IFCAP_RSS;
3903 error = ether_ioctl(ifp, command, data);
3910 bnx_watchdog(struct ifaltq_subque *ifsq)
3912 struct ifnet *ifp = ifsq_get_ifp(ifsq);
3913 struct bnx_softc *sc = ifp->if_softc;
3916 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3918 if_printf(ifp, "watchdog timeout -- resetting\n");
3922 IFNET_STAT_INC(ifp, oerrors, 1);
3924 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
3925 ifsq_devstart_sched(sc->bnx_tx_ring[i].bnx_ifsq);
3929 * Stop the adapter and free any mbufs allocated to the
3933 bnx_stop(struct bnx_softc *sc)
3935 struct ifnet *ifp = &sc->arpcom.ac_if;
3938 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3940 callout_stop(&sc->bnx_tick_timer);
3942 /* Disable host interrupts. */
3943 bnx_disable_intr(sc);
3946 * Tell firmware we're shutting down.
3948 bnx_sig_pre_reset(sc, BNX_RESET_SHUTDOWN);
3951 * Disable all of the receiver blocks
3953 bnx_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3954 bnx_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3955 bnx_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3956 bnx_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3957 bnx_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3958 bnx_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3961 * Disable all of the transmit blocks
3963 bnx_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3964 bnx_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3965 bnx_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3966 bnx_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3967 bnx_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3968 bnx_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3971 * Shut down all of the memory managers and related
3974 bnx_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3975 bnx_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3976 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3977 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3980 bnx_sig_post_reset(sc, BNX_RESET_SHUTDOWN);
3983 * Tell firmware we're shutting down.
3985 BNX_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3987 /* Free the RX lists. */
3988 bnx_free_rx_ring_std(&sc->bnx_rx_std_ring);
3990 /* Free jumbo RX list. */
3991 if (BNX_IS_JUMBO_CAPABLE(sc))
3992 bnx_free_rx_ring_jumbo(sc);
3994 /* Free TX buffers. */
3995 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
3996 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
3998 txr->bnx_saved_status_tag = 0;
3999 bnx_free_tx_ring(txr);
4002 /* Clear saved status tag */
4003 for (i = 0; i < sc->bnx_rx_retcnt; ++i)
4004 sc->bnx_rx_ret_ring[i].bnx_saved_status_tag = 0;
4007 sc->bnx_coal_chg = 0;
4009 ifp->if_flags &= ~IFF_RUNNING;
4010 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
4011 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
4013 ifsq_clr_oactive(txr->bnx_ifsq);
4014 ifsq_watchdog_stop(&txr->bnx_tx_watchdog);
4019 * Stop all chip I/O so that the kernel's probe routines don't
4020 * get confused by errant DMAs when rebooting.
4023 bnx_shutdown(device_t dev)
4025 struct bnx_softc *sc = device_get_softc(dev);
4026 struct ifnet *ifp = &sc->arpcom.ac_if;
4028 ifnet_serialize_all(ifp);
4030 ifnet_deserialize_all(ifp);
4034 bnx_suspend(device_t dev)
4036 struct bnx_softc *sc = device_get_softc(dev);
4037 struct ifnet *ifp = &sc->arpcom.ac_if;
4039 ifnet_serialize_all(ifp);
4041 ifnet_deserialize_all(ifp);
4047 bnx_resume(device_t dev)
4049 struct bnx_softc *sc = device_get_softc(dev);
4050 struct ifnet *ifp = &sc->arpcom.ac_if;
4052 ifnet_serialize_all(ifp);
4054 if (ifp->if_flags & IFF_UP) {
4058 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
4059 ifsq_devstart_sched(sc->bnx_tx_ring[i].bnx_ifsq);
4062 ifnet_deserialize_all(ifp);
4068 bnx_setpromisc(struct bnx_softc *sc)
4070 struct ifnet *ifp = &sc->arpcom.ac_if;
4072 if (ifp->if_flags & IFF_PROMISC)
4073 BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
4075 BNX_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
4079 bnx_dma_free(struct bnx_softc *sc)
4081 struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring;
4084 /* Destroy RX return rings */
4085 if (sc->bnx_rx_ret_ring != NULL) {
4086 for (i = 0; i < sc->bnx_rx_retcnt; ++i)
4087 bnx_destroy_rx_ret_ring(&sc->bnx_rx_ret_ring[i]);
4088 kfree(sc->bnx_rx_ret_ring, M_DEVBUF);
4091 /* Destroy RX mbuf DMA stuffs. */
4092 if (std->bnx_rx_mtag != NULL) {
4093 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
4094 KKASSERT(std->bnx_rx_std_buf[i].bnx_rx_mbuf == NULL);
4095 bus_dmamap_destroy(std->bnx_rx_mtag,
4096 std->bnx_rx_std_buf[i].bnx_rx_dmamap);
4098 bus_dma_tag_destroy(std->bnx_rx_mtag);
4101 /* Destroy standard RX ring */
4102 bnx_dma_block_free(std->bnx_rx_std_ring_tag,
4103 std->bnx_rx_std_ring_map, std->bnx_rx_std_ring);
4105 /* Destroy TX rings */
4106 if (sc->bnx_tx_ring != NULL) {
4107 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
4108 bnx_destroy_tx_ring(&sc->bnx_tx_ring[i]);
4109 kfree(sc->bnx_tx_ring, M_DEVBUF);
4112 if (BNX_IS_JUMBO_CAPABLE(sc))
4113 bnx_free_jumbo_mem(sc);
4115 /* Destroy status blocks */
4116 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
4117 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
4119 bnx_dma_block_free(intr->bnx_status_tag,
4120 intr->bnx_status_map, intr->bnx_status_block);
4123 /* Destroy the parent tag */
4124 if (sc->bnx_cdata.bnx_parent_tag != NULL)
4125 bus_dma_tag_destroy(sc->bnx_cdata.bnx_parent_tag);
4129 bnx_dma_alloc(device_t dev)
4131 struct bnx_softc *sc = device_get_softc(dev);
4132 struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring;
4136 * Allocate the parent bus DMA tag appropriate for PCI.
4138 * All of the NetExtreme/NetLink controllers have 4GB boundary
4140 * Whenever an address crosses a multiple of the 4GB boundary
4141 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
4142 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
4143 * state machine will lockup and cause the device to hang.
4145 error = bus_dma_tag_create(NULL, 1, BGE_DMA_BOUNDARY_4G,
4146 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
4147 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
4148 0, &sc->bnx_cdata.bnx_parent_tag);
4150 device_printf(dev, "could not create parent DMA tag\n");
4155 * Create DMA stuffs for status blocks.
4157 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
4158 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
4160 error = bnx_dma_block_alloc(sc,
4161 __VM_CACHELINE_ALIGN(BGE_STATUS_BLK_SZ),
4162 &intr->bnx_status_tag, &intr->bnx_status_map,
4163 (void *)&intr->bnx_status_block,
4164 &intr->bnx_status_block_paddr);
4167 "could not create %dth status block\n", i);
4171 sc->bnx_hw_status = &sc->bnx_intr_data[0].bnx_status_block->bge_status;
4172 if (sc->bnx_flags & BNX_FLAG_STATUS_HASTAG) {
4173 sc->bnx_hw_status_tag =
4174 &sc->bnx_intr_data[0].bnx_status_block->bge_status_tag;
4178 * Create DMA tag and maps for RX mbufs.
4181 lwkt_serialize_init(&std->bnx_rx_std_serialize);
4182 error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0,
4183 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
4184 NULL, NULL, MCLBYTES, 1, MCLBYTES,
4185 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, &std->bnx_rx_mtag);
4187 device_printf(dev, "could not create RX mbuf DMA tag\n");
4191 for (i = 0; i < BGE_STD_RX_RING_CNT; ++i) {
4192 error = bus_dmamap_create(std->bnx_rx_mtag, BUS_DMA_WAITOK,
4193 &std->bnx_rx_std_buf[i].bnx_rx_dmamap);
4197 for (j = 0; j < i; ++j) {
4198 bus_dmamap_destroy(std->bnx_rx_mtag,
4199 std->bnx_rx_std_buf[j].bnx_rx_dmamap);
4201 bus_dma_tag_destroy(std->bnx_rx_mtag);
4202 std->bnx_rx_mtag = NULL;
4205 "could not create %dth RX mbuf DMA map\n", i);
4211 * Create DMA stuffs for standard RX ring.
4213 error = bnx_dma_block_alloc(sc, BGE_STD_RX_RING_SZ,
4214 &std->bnx_rx_std_ring_tag,
4215 &std->bnx_rx_std_ring_map,
4216 (void *)&std->bnx_rx_std_ring,
4217 &std->bnx_rx_std_ring_paddr);
4219 device_printf(dev, "could not create std RX ring\n");
4224 * Create RX return rings
4226 mbx = BGE_MBX_RX_CONS0_LO;
4227 sc->bnx_rx_ret_ring = kmalloc_cachealign(
4228 sizeof(struct bnx_rx_ret_ring) * sc->bnx_rx_retcnt, M_DEVBUF,
4230 for (i = 0; i < sc->bnx_rx_retcnt; ++i) {
4231 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i];
4232 struct bnx_intr_data *intr;
4236 ret->bnx_rx_mbx = mbx;
4237 ret->bnx_rx_cntmax = (BGE_STD_RX_RING_CNT / 4) /
4239 ret->bnx_rx_mask = 1 << i;
4241 if (!BNX_RSS_ENABLED(sc)) {
4242 intr = &sc->bnx_intr_data[0];
4244 KKASSERT(i + 1 < sc->bnx_intr_cnt);
4245 intr = &sc->bnx_intr_data[i + 1];
4249 ret->bnx_rx_considx =
4250 &intr->bnx_status_block->bge_idx[0].bge_rx_prod_idx;
4251 } else if (i == 1) {
4252 ret->bnx_rx_considx =
4253 &intr->bnx_status_block->bge_rx_jumbo_cons_idx;
4254 } else if (i == 2) {
4255 ret->bnx_rx_considx =
4256 &intr->bnx_status_block->bge_rsvd1;
4257 } else if (i == 3) {
4258 ret->bnx_rx_considx =
4259 &intr->bnx_status_block->bge_rx_mini_cons_idx;
4261 panic("unknown RX return ring %d\n", i);
4263 ret->bnx_hw_status_tag =
4264 &intr->bnx_status_block->bge_status_tag;
4266 error = bnx_create_rx_ret_ring(ret);
4269 "could not create %dth RX ret ring\n", i);
4278 sc->bnx_tx_ring = kmalloc_cachealign(
4279 sizeof(struct bnx_tx_ring) * sc->bnx_tx_ringcnt, M_DEVBUF,
4281 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
4282 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i];
4283 struct bnx_intr_data *intr;
4286 txr->bnx_tx_mbx = bnx_tx_mailbox[i];
4288 if (sc->bnx_tx_ringcnt == 1) {
4289 intr = &sc->bnx_intr_data[0];
4291 KKASSERT(i + 1 < sc->bnx_intr_cnt);
4292 intr = &sc->bnx_intr_data[i + 1];
4295 if ((sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) == 0) {
4296 txr->bnx_hw_status_tag =
4297 &intr->bnx_status_block->bge_status_tag;
4299 txr->bnx_tx_considx =
4300 &intr->bnx_status_block->bge_idx[0].bge_tx_cons_idx;
4302 error = bnx_create_tx_ring(txr);
4305 "could not create %dth TX ring\n", i);
4311 * Create jumbo buffer pool.
4313 if (BNX_IS_JUMBO_CAPABLE(sc)) {
4314 error = bnx_alloc_jumbo_mem(sc);
4317 "could not create jumbo buffer pool\n");
4326 bnx_dma_block_alloc(struct bnx_softc *sc, bus_size_t size, bus_dma_tag_t *tag,
4327 bus_dmamap_t *map, void **addr, bus_addr_t *paddr)
4332 error = bus_dmamem_coherent(sc->bnx_cdata.bnx_parent_tag, PAGE_SIZE, 0,
4333 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
4334 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
4338 *tag = dmem.dmem_tag;
4339 *map = dmem.dmem_map;
4340 *addr = dmem.dmem_addr;
4341 *paddr = dmem.dmem_busaddr;
4347 bnx_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr)
4350 bus_dmamap_unload(tag, map);
4351 bus_dmamem_free(tag, addr, map);
4352 bus_dma_tag_destroy(tag);
4357 bnx_tbi_link_upd(struct bnx_softc *sc, uint32_t status)
4359 struct ifnet *ifp = &sc->arpcom.ac_if;
4361 #define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE)
4364 * Sometimes PCS encoding errors are detected in
4365 * TBI mode (on fiber NICs), and for some reason
4366 * the chip will signal them as link changes.
4367 * If we get a link change event, but the 'PCS
4368 * encoding error' bit in the MAC status register
4369 * is set, don't bother doing a link check.
4370 * This avoids spurious "gigabit link up" messages
4371 * that sometimes appear on fiber NICs during
4372 * periods of heavy traffic.
4374 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
4375 if (!sc->bnx_link) {
4377 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704) {
4378 BNX_CLRBIT(sc, BGE_MAC_MODE,
4379 BGE_MACMODE_TBI_SEND_CFGS);
4382 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
4385 if_printf(ifp, "link UP\n");
4387 ifp->if_link_state = LINK_STATE_UP;
4388 if_link_state_change(ifp);
4390 } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) {
4395 if_printf(ifp, "link DOWN\n");
4397 ifp->if_link_state = LINK_STATE_DOWN;
4398 if_link_state_change(ifp);
4402 #undef PCS_ENCODE_ERR
4404 /* Clear the attention. */
4405 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
4406 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
4407 BGE_MACSTAT_LINK_CHANGED);
4411 bnx_copper_link_upd(struct bnx_softc *sc, uint32_t status __unused)
4413 struct ifnet *ifp = &sc->arpcom.ac_if;
4414 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
4417 bnx_miibus_statchg(sc->bnx_dev);
4421 if_printf(ifp, "link UP\n");
4423 if_printf(ifp, "link DOWN\n");
4426 /* Clear the attention. */
4427 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
4428 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
4429 BGE_MACSTAT_LINK_CHANGED);
4433 bnx_autopoll_link_upd(struct bnx_softc *sc, uint32_t status __unused)
4435 struct ifnet *ifp = &sc->arpcom.ac_if;
4436 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
4440 if (!sc->bnx_link &&
4441 (mii->mii_media_status & IFM_ACTIVE) &&
4442 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4445 if_printf(ifp, "link UP\n");
4446 } else if (sc->bnx_link &&
4447 (!(mii->mii_media_status & IFM_ACTIVE) ||
4448 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4451 if_printf(ifp, "link DOWN\n");
4454 /* Clear the attention. */
4455 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
4456 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
4457 BGE_MACSTAT_LINK_CHANGED);
4461 bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS)
4463 struct bnx_softc *sc = arg1;
4465 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
4466 &sc->bnx_rx_coal_ticks,
4467 BNX_RX_COAL_TICKS_MIN, BNX_RX_COAL_TICKS_MAX,
4468 BNX_RX_COAL_TICKS_CHG);
4472 bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS)
4474 struct bnx_softc *sc = arg1;
4476 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
4477 &sc->bnx_tx_coal_ticks,
4478 BNX_TX_COAL_TICKS_MIN, BNX_TX_COAL_TICKS_MAX,
4479 BNX_TX_COAL_TICKS_CHG);
4483 bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS)
4485 struct bnx_softc *sc = arg1;
4487 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
4488 &sc->bnx_rx_coal_bds,
4489 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX,
4490 BNX_RX_COAL_BDS_CHG);
4494 bnx_sysctl_rx_coal_bds_poll(SYSCTL_HANDLER_ARGS)
4496 struct bnx_softc *sc = arg1;
4498 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
4499 &sc->bnx_rx_coal_bds_poll,
4500 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX,
4501 BNX_RX_COAL_BDS_CHG);
4505 bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS)
4507 struct bnx_softc *sc = arg1;
4509 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
4510 &sc->bnx_tx_coal_bds,
4511 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX,
4512 BNX_TX_COAL_BDS_CHG);
4516 bnx_sysctl_tx_coal_bds_poll(SYSCTL_HANDLER_ARGS)
4518 struct bnx_softc *sc = arg1;
4520 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
4521 &sc->bnx_tx_coal_bds_poll,
4522 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX,
4523 BNX_TX_COAL_BDS_CHG);
4527 bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS)
4529 struct bnx_softc *sc = arg1;
4531 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
4532 &sc->bnx_rx_coal_bds_int,
4533 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX,
4534 BNX_RX_COAL_BDS_INT_CHG);
4538 bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS)
4540 struct bnx_softc *sc = arg1;
4542 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
4543 &sc->bnx_tx_coal_bds_int,
4544 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX,
4545 BNX_TX_COAL_BDS_INT_CHG);
4549 bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal,
4550 int coal_min, int coal_max, uint32_t coal_chg_mask)
4552 struct bnx_softc *sc = arg1;
4553 struct ifnet *ifp = &sc->arpcom.ac_if;
4556 ifnet_serialize_all(ifp);
4559 error = sysctl_handle_int(oidp, &v, 0, req);
4560 if (!error && req->newptr != NULL) {
4561 if (v < coal_min || v > coal_max) {
4565 sc->bnx_coal_chg |= coal_chg_mask;
4567 /* Commit changes */
4568 bnx_coal_change(sc);
4572 ifnet_deserialize_all(ifp);
4577 bnx_coal_change(struct bnx_softc *sc)
4579 struct ifnet *ifp = &sc->arpcom.ac_if;
4582 ASSERT_IFNET_SERIALIZED_ALL(ifp);
4584 if (sc->bnx_coal_chg & BNX_RX_COAL_TICKS_CHG) {
4585 if (sc->bnx_rx_retcnt == 1) {
4586 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS,
4587 sc->bnx_rx_coal_ticks);
4590 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, 0);
4591 for (i = 0; i < sc->bnx_rx_retcnt; ++i) {
4592 CSR_WRITE_4(sc, BGE_VEC1_RX_COAL_TICKS +
4593 (i * BGE_VEC_COALSET_SIZE),
4594 sc->bnx_rx_coal_ticks);
4597 for (; i < BNX_INTR_MAX - 1; ++i) {
4598 CSR_WRITE_4(sc, BGE_VEC1_RX_COAL_TICKS +
4599 (i * BGE_VEC_COALSET_SIZE), 0);
4602 if_printf(ifp, "rx_coal_ticks -> %u\n",
4603 sc->bnx_rx_coal_ticks);
4607 if (sc->bnx_coal_chg & BNX_TX_COAL_TICKS_CHG) {
4608 if (sc->bnx_tx_ringcnt == 1) {
4609 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS,
4610 sc->bnx_tx_coal_ticks);
4613 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, 0);
4614 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
4615 CSR_WRITE_4(sc, BGE_VEC1_TX_COAL_TICKS +
4616 (i * BGE_VEC_COALSET_SIZE),
4617 sc->bnx_tx_coal_ticks);
4620 for (; i < BNX_INTR_MAX - 1; ++i) {
4621 CSR_WRITE_4(sc, BGE_VEC1_TX_COAL_TICKS +
4622 (i * BGE_VEC_COALSET_SIZE), 0);
4625 if_printf(ifp, "tx_coal_ticks -> %u\n",
4626 sc->bnx_tx_coal_ticks);
4630 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_CHG) {
4631 uint32_t rx_coal_bds;
4633 if (ifp->if_flags & IFF_NPOLLING)
4634 rx_coal_bds = sc->bnx_rx_coal_bds_poll;
4636 rx_coal_bds = sc->bnx_rx_coal_bds;
4638 if (sc->bnx_rx_retcnt == 1) {
4639 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_coal_bds);
4642 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, 0);
4643 for (i = 0; i < sc->bnx_rx_retcnt; ++i) {
4644 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS +
4645 (i * BGE_VEC_COALSET_SIZE), rx_coal_bds);
4648 for (; i < BNX_INTR_MAX - 1; ++i) {
4649 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS +
4650 (i * BGE_VEC_COALSET_SIZE), 0);
4653 if_printf(ifp, "%srx_coal_bds -> %u\n",
4654 (ifp->if_flags & IFF_NPOLLING) ? "polling " : "",
4659 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_CHG) {
4660 uint32_t tx_coal_bds;
4662 if (ifp->if_flags & IFF_NPOLLING)
4663 tx_coal_bds = sc->bnx_tx_coal_bds_poll;
4665 tx_coal_bds = sc->bnx_tx_coal_bds;
4667 if (sc->bnx_tx_ringcnt == 1) {
4668 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, tx_coal_bds);
4671 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, 0);
4672 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
4673 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS +
4674 (i * BGE_VEC_COALSET_SIZE), tx_coal_bds);
4677 for (; i < BNX_INTR_MAX - 1; ++i) {
4678 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS +
4679 (i * BGE_VEC_COALSET_SIZE), 0);
4682 if_printf(ifp, "%stx_coal_bds -> %u\n",
4683 (ifp->if_flags & IFF_NPOLLING) ? "polling " : "",
4688 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_INT_CHG) {
4689 if (sc->bnx_rx_retcnt == 1) {
4690 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT,
4691 sc->bnx_rx_coal_bds_int);
4694 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
4695 for (i = 0; i < sc->bnx_rx_retcnt; ++i) {
4696 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS_INT +
4697 (i * BGE_VEC_COALSET_SIZE),
4698 sc->bnx_rx_coal_bds_int);
4701 for (; i < BNX_INTR_MAX - 1; ++i) {
4702 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS_INT +
4703 (i * BGE_VEC_COALSET_SIZE), 0);
4706 if_printf(ifp, "rx_coal_bds_int -> %u\n",
4707 sc->bnx_rx_coal_bds_int);
4711 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_INT_CHG) {
4712 if (sc->bnx_tx_ringcnt == 1) {
4713 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT,
4714 sc->bnx_tx_coal_bds_int);
4717 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
4718 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
4719 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS_INT +
4720 (i * BGE_VEC_COALSET_SIZE),
4721 sc->bnx_tx_coal_bds_int);
4724 for (; i < BNX_INTR_MAX - 1; ++i) {
4725 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS_INT +
4726 (i * BGE_VEC_COALSET_SIZE), 0);
4729 if_printf(ifp, "tx_coal_bds_int -> %u\n",
4730 sc->bnx_tx_coal_bds_int);
4734 sc->bnx_coal_chg = 0;
4738 bnx_check_intr_rxtx(void *xintr)
4740 struct bnx_intr_data *intr = xintr;
4741 struct bnx_rx_ret_ring *ret;
4742 struct bnx_tx_ring *txr;
4745 lwkt_serialize_enter(intr->bnx_intr_serialize);
4747 KKASSERT(mycpuid == intr->bnx_intr_cpuid);
4749 ifp = &intr->bnx_sc->arpcom.ac_if;
4750 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) {
4751 lwkt_serialize_exit(intr->bnx_intr_serialize);
4755 txr = intr->bnx_txr;
4756 ret = intr->bnx_ret;
4758 if (*ret->bnx_rx_considx != ret->bnx_rx_saved_considx ||
4759 *txr->bnx_tx_considx != txr->bnx_tx_saved_considx) {
4760 if (intr->bnx_rx_check_considx == ret->bnx_rx_saved_considx &&
4761 intr->bnx_tx_check_considx == txr->bnx_tx_saved_considx) {
4762 if (!intr->bnx_intr_maylose) {
4763 intr->bnx_intr_maylose = TRUE;
4767 if_printf(ifp, "lost interrupt\n");
4768 intr->bnx_intr_func(intr->bnx_intr_arg);
4771 intr->bnx_intr_maylose = FALSE;
4772 intr->bnx_rx_check_considx = ret->bnx_rx_saved_considx;
4773 intr->bnx_tx_check_considx = txr->bnx_tx_saved_considx;
4776 callout_reset(&intr->bnx_intr_timer, BNX_INTR_CKINTVL,
4777 intr->bnx_intr_check, intr);
4778 lwkt_serialize_exit(intr->bnx_intr_serialize);
4782 bnx_check_intr_tx(void *xintr)
4784 struct bnx_intr_data *intr = xintr;
4785 struct bnx_tx_ring *txr;
4788 lwkt_serialize_enter(intr->bnx_intr_serialize);
4790 KKASSERT(mycpuid == intr->bnx_intr_cpuid);
4792 ifp = &intr->bnx_sc->arpcom.ac_if;
4793 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) {
4794 lwkt_serialize_exit(intr->bnx_intr_serialize);
4798 txr = intr->bnx_txr;
4800 if (*txr->bnx_tx_considx != txr->bnx_tx_saved_considx) {
4801 if (intr->bnx_tx_check_considx == txr->bnx_tx_saved_considx) {
4802 if (!intr->bnx_intr_maylose) {
4803 intr->bnx_intr_maylose = TRUE;
4807 if_printf(ifp, "lost interrupt\n");
4808 intr->bnx_intr_func(intr->bnx_intr_arg);
4811 intr->bnx_intr_maylose = FALSE;
4812 intr->bnx_tx_check_considx = txr->bnx_tx_saved_considx;
4815 callout_reset(&intr->bnx_intr_timer, BNX_INTR_CKINTVL,
4816 intr->bnx_intr_check, intr);
4817 lwkt_serialize_exit(intr->bnx_intr_serialize);
4821 bnx_check_intr_rx(void *xintr)
4823 struct bnx_intr_data *intr = xintr;
4824 struct bnx_rx_ret_ring *ret;
4827 lwkt_serialize_enter(intr->bnx_intr_serialize);
4829 KKASSERT(mycpuid == intr->bnx_intr_cpuid);
4831 ifp = &intr->bnx_sc->arpcom.ac_if;
4832 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) {
4833 lwkt_serialize_exit(intr->bnx_intr_serialize);
4837 ret = intr->bnx_ret;
4839 if (*ret->bnx_rx_considx != ret->bnx_rx_saved_considx) {
4840 if (intr->bnx_rx_check_considx == ret->bnx_rx_saved_considx) {
4841 if (!intr->bnx_intr_maylose) {
4842 intr->bnx_intr_maylose = TRUE;
4846 if_printf(ifp, "lost interrupt\n");
4847 intr->bnx_intr_func(intr->bnx_intr_arg);
4850 intr->bnx_intr_maylose = FALSE;
4851 intr->bnx_rx_check_considx = ret->bnx_rx_saved_considx;
4854 callout_reset(&intr->bnx_intr_timer, BNX_INTR_CKINTVL,
4855 intr->bnx_intr_check, intr);
4856 lwkt_serialize_exit(intr->bnx_intr_serialize);
4860 bnx_enable_intr(struct bnx_softc *sc)
4862 struct ifnet *ifp = &sc->arpcom.ac_if;
4865 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
4866 lwkt_serialize_handler_enable(
4867 sc->bnx_intr_data[i].bnx_intr_serialize);
4873 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
4874 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
4876 bnx_writembx(sc, intr->bnx_intr_mbx,
4877 (*intr->bnx_saved_status_tag) << 24);
4878 /* XXX Linux driver */
4879 bnx_writembx(sc, intr->bnx_intr_mbx,
4880 (*intr->bnx_saved_status_tag) << 24);
4884 * Unmask the interrupt when we stop polling.
4886 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_MISC_CTL,
4887 BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
4890 * Trigger another interrupt, since above writing
4891 * to interrupt mailbox0 may acknowledge pending
4894 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4896 if (sc->bnx_flags & BNX_FLAG_STATUSTAG_BUG) {
4898 if_printf(ifp, "status tag bug workaround\n");
4900 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
4901 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
4903 if (intr->bnx_intr_check == NULL)
4905 intr->bnx_intr_maylose = FALSE;
4906 intr->bnx_rx_check_considx = 0;
4907 intr->bnx_tx_check_considx = 0;
4908 callout_reset_bycpu(&intr->bnx_intr_timer,
4909 BNX_INTR_CKINTVL, intr->bnx_intr_check, intr,
4910 intr->bnx_intr_cpuid);
4916 bnx_disable_intr(struct bnx_softc *sc)
4920 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
4921 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
4923 callout_stop(&intr->bnx_intr_timer);
4924 intr->bnx_intr_maylose = FALSE;
4925 intr->bnx_rx_check_considx = 0;
4926 intr->bnx_tx_check_considx = 0;
4930 * Mask the interrupt when we start polling.
4932 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL,
4933 BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
4936 * Acknowledge possible asserted interrupt.
4938 for (i = 0; i < BNX_INTR_MAX; ++i)
4939 bnx_writembx(sc, sc->bnx_intr_data[i].bnx_intr_mbx, 1);
4941 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
4942 lwkt_serialize_handler_disable(
4943 sc->bnx_intr_data[i].bnx_intr_serialize);
4948 bnx_get_eaddr_mem(struct bnx_softc *sc, uint8_t ether_addr[])
4953 mac_addr = bnx_readmem_ind(sc, 0x0c14);
4954 if ((mac_addr >> 16) == 0x484b) {
4955 ether_addr[0] = (uint8_t)(mac_addr >> 8);
4956 ether_addr[1] = (uint8_t)mac_addr;
4957 mac_addr = bnx_readmem_ind(sc, 0x0c18);
4958 ether_addr[2] = (uint8_t)(mac_addr >> 24);
4959 ether_addr[3] = (uint8_t)(mac_addr >> 16);
4960 ether_addr[4] = (uint8_t)(mac_addr >> 8);
4961 ether_addr[5] = (uint8_t)mac_addr;
4968 bnx_get_eaddr_nvram(struct bnx_softc *sc, uint8_t ether_addr[])
4970 int mac_offset = BGE_EE_MAC_OFFSET;
4972 if (BNX_IS_5717_PLUS(sc)) {
4975 f = pci_get_function(sc->bnx_dev);
4977 mac_offset = BGE_EE_MAC_OFFSET_5717;
4979 mac_offset += BGE_EE_MAC_OFFSET_5717_OFF;
4982 return bnx_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN);
4986 bnx_get_eaddr_eeprom(struct bnx_softc *sc, uint8_t ether_addr[])
4988 if (sc->bnx_flags & BNX_FLAG_NO_EEPROM)
4991 return bnx_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
4996 bnx_get_eaddr(struct bnx_softc *sc, uint8_t eaddr[])
4998 static const bnx_eaddr_fcn_t bnx_eaddr_funcs[] = {
4999 /* NOTE: Order is critical */
5001 bnx_get_eaddr_nvram,
5002 bnx_get_eaddr_eeprom,
5005 const bnx_eaddr_fcn_t *func;
5007 for (func = bnx_eaddr_funcs; *func != NULL; ++func) {
5008 if ((*func)(sc, eaddr) == 0)
5011 return (*func == NULL ? ENXIO : 0);
5015 * NOTE: 'm' is not freed upon failure
5018 bnx_defrag_shortdma(struct mbuf *m)
5024 * If device receive two back-to-back send BDs with less than
5025 * or equal to 8 total bytes then the device may hang. The two
5026 * back-to-back send BDs must in the same frame for this failure
5027 * to occur. Scan mbuf chains and see whether two back-to-back
5028 * send BDs are there. If this is the case, allocate new mbuf
5029 * and copy the frame to workaround the silicon bug.
5031 for (n = m, found = 0; n != NULL; n = n->m_next) {
5042 n = m_defrag(m, M_NOWAIT);
5049 bnx_stop_block(struct bnx_softc *sc, bus_size_t reg, uint32_t bit)
5053 BNX_CLRBIT(sc, reg, bit);
5054 for (i = 0; i < BNX_TIMEOUT; i++) {
5055 if ((CSR_READ_4(sc, reg) & bit) == 0)
5062 bnx_link_poll(struct bnx_softc *sc)
5066 status = CSR_READ_4(sc, BGE_MAC_STS);
5067 if ((status & sc->bnx_link_chg) || sc->bnx_link_evt) {
5068 sc->bnx_link_evt = 0;
5069 sc->bnx_link_upd(sc, status);
5074 bnx_enable_msi(struct bnx_softc *sc, boolean_t is_msix)
5078 msi_mode = CSR_READ_4(sc, BGE_MSI_MODE);
5079 msi_mode |= BGE_MSIMODE_ENABLE;
5082 * 5718-PG105-R says that "one shot" mode does not work
5083 * if MSI is used, however, it obviously works.
5085 msi_mode &= ~BGE_MSIMODE_ONESHOT_DISABLE;
5087 msi_mode |= BGE_MSIMODE_MSIX_MULTIMODE;
5089 msi_mode &= ~BGE_MSIMODE_MSIX_MULTIMODE;
5090 CSR_WRITE_4(sc, BGE_MSI_MODE, msi_mode);
5094 bnx_dma_swap_options(struct bnx_softc *sc)
5096 uint32_t dma_options;
5098 dma_options = BGE_MODECTL_WORDSWAP_NONFRAME |
5099 BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA;
5100 #if BYTE_ORDER == BIG_ENDIAN
5101 dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME;
5107 bnx_setup_tso(struct bnx_tx_ring *txr, struct mbuf **mp,
5108 uint16_t *mss0, uint16_t *flags0)
5113 int thoff, iphlen, hoff, hlen;
5114 uint16_t flags, mss;
5117 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
5119 hoff = m->m_pkthdr.csum_lhlen;
5120 iphlen = m->m_pkthdr.csum_iphlen;
5121 thoff = m->m_pkthdr.csum_thlen;
5123 KASSERT(hoff > 0, ("invalid ether header len"));
5124 KASSERT(iphlen > 0, ("invalid ip header len"));
5125 KASSERT(thoff > 0, ("invalid tcp header len"));
5127 if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
5128 m = m_pullup(m, hoff + iphlen + thoff);
5135 ip = mtodoff(m, struct ip *, hoff);
5136 th = mtodoff(m, struct tcphdr *, hoff + iphlen);
5138 mss = m->m_pkthdr.tso_segsz;
5139 flags = BGE_TXBDFLAG_CPU_PRE_DMA | BGE_TXBDFLAG_CPU_POST_DMA;
5141 ip->ip_len = htons(mss + iphlen + thoff);
5144 hlen = (iphlen + thoff) >> 2;
5145 mss |= ((hlen & 0x3) << 14);
5146 flags |= ((hlen & 0xf8) << 7) | ((hlen & 0x4) << 2);
5155 bnx_create_tx_ring(struct bnx_tx_ring *txr)
5157 bus_size_t txmaxsz, txmaxsegsz;
5160 lwkt_serialize_init(&txr->bnx_tx_serialize);
5163 * Create DMA tag and maps for TX mbufs.
5165 if (txr->bnx_sc->bnx_flags & BNX_FLAG_TSO)
5166 txmaxsz = IP_MAXPACKET + sizeof(struct ether_vlan_header);
5168 txmaxsz = BNX_JUMBO_FRAMELEN;
5169 if (txr->bnx_sc->bnx_asicrev == BGE_ASICREV_BCM57766)
5170 txmaxsegsz = MCLBYTES;
5172 txmaxsegsz = PAGE_SIZE;
5173 error = bus_dma_tag_create(txr->bnx_sc->bnx_cdata.bnx_parent_tag,
5174 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
5175 txmaxsz, BNX_NSEG_NEW, txmaxsegsz,
5176 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
5179 device_printf(txr->bnx_sc->bnx_dev,
5180 "could not create TX mbuf DMA tag\n");
5184 for (i = 0; i < BGE_TX_RING_CNT; i++) {
5185 error = bus_dmamap_create(txr->bnx_tx_mtag,
5186 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
5187 &txr->bnx_tx_buf[i].bnx_tx_dmamap);
5191 for (j = 0; j < i; ++j) {
5192 bus_dmamap_destroy(txr->bnx_tx_mtag,
5193 txr->bnx_tx_buf[j].bnx_tx_dmamap);
5195 bus_dma_tag_destroy(txr->bnx_tx_mtag);
5196 txr->bnx_tx_mtag = NULL;
5198 device_printf(txr->bnx_sc->bnx_dev,
5199 "could not create TX mbuf DMA map\n");
5205 * Create DMA stuffs for TX ring.
5207 error = bnx_dma_block_alloc(txr->bnx_sc, BGE_TX_RING_SZ,
5208 &txr->bnx_tx_ring_tag,
5209 &txr->bnx_tx_ring_map,
5210 (void *)&txr->bnx_tx_ring,
5211 &txr->bnx_tx_ring_paddr);
5213 device_printf(txr->bnx_sc->bnx_dev,
5214 "could not create TX ring\n");
5218 txr->bnx_tx_flags |= BNX_TX_FLAG_SHORTDMA;
5219 txr->bnx_tx_wreg = BNX_TX_WREG_NSEGS;
5225 bnx_destroy_tx_ring(struct bnx_tx_ring *txr)
5227 /* Destroy TX mbuf DMA stuffs. */
5228 if (txr->bnx_tx_mtag != NULL) {
5231 for (i = 0; i < BGE_TX_RING_CNT; i++) {
5232 KKASSERT(txr->bnx_tx_buf[i].bnx_tx_mbuf == NULL);
5233 bus_dmamap_destroy(txr->bnx_tx_mtag,
5234 txr->bnx_tx_buf[i].bnx_tx_dmamap);
5236 bus_dma_tag_destroy(txr->bnx_tx_mtag);
5239 /* Destroy TX ring */
5240 bnx_dma_block_free(txr->bnx_tx_ring_tag,
5241 txr->bnx_tx_ring_map, txr->bnx_tx_ring);
5245 bnx_sysctl_force_defrag(SYSCTL_HANDLER_ARGS)
5247 struct bnx_softc *sc = (void *)arg1;
5248 struct ifnet *ifp = &sc->arpcom.ac_if;
5249 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0];
5250 int error, defrag, i;
5252 if (txr->bnx_tx_flags & BNX_TX_FLAG_FORCE_DEFRAG)
5257 error = sysctl_handle_int(oidp, &defrag, 0, req);
5258 if (error || req->newptr == NULL)
5261 ifnet_serialize_all(ifp);
5262 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) {
5263 txr = &sc->bnx_tx_ring[i];
5265 txr->bnx_tx_flags |= BNX_TX_FLAG_FORCE_DEFRAG;
5267 txr->bnx_tx_flags &= ~BNX_TX_FLAG_FORCE_DEFRAG;
5269 ifnet_deserialize_all(ifp);
5275 bnx_sysctl_tx_wreg(SYSCTL_HANDLER_ARGS)
5277 struct bnx_softc *sc = (void *)arg1;
5278 struct ifnet *ifp = &sc->arpcom.ac_if;
5279 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0];
5280 int error, tx_wreg, i;
5282 tx_wreg = txr->bnx_tx_wreg;
5283 error = sysctl_handle_int(oidp, &tx_wreg, 0, req);
5284 if (error || req->newptr == NULL)
5287 ifnet_serialize_all(ifp);
5288 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
5289 sc->bnx_tx_ring[i].bnx_tx_wreg = tx_wreg;
5290 ifnet_deserialize_all(ifp);
5296 bnx_create_rx_ret_ring(struct bnx_rx_ret_ring *ret)
5300 lwkt_serialize_init(&ret->bnx_rx_ret_serialize);
5303 * Create DMA stuffs for RX return ring.
5305 error = bnx_dma_block_alloc(ret->bnx_sc,
5306 BGE_RX_RTN_RING_SZ(BNX_RETURN_RING_CNT),
5307 &ret->bnx_rx_ret_ring_tag,
5308 &ret->bnx_rx_ret_ring_map,
5309 (void *)&ret->bnx_rx_ret_ring,
5310 &ret->bnx_rx_ret_ring_paddr);
5312 device_printf(ret->bnx_sc->bnx_dev,
5313 "could not create RX ret ring\n");
5317 /* Shadow standard ring's RX mbuf DMA tag */
5318 ret->bnx_rx_mtag = ret->bnx_std->bnx_rx_mtag;
5321 * Create tmp DMA map for RX mbufs.
5323 error = bus_dmamap_create(ret->bnx_rx_mtag, BUS_DMA_WAITOK,
5324 &ret->bnx_rx_tmpmap);
5326 device_printf(ret->bnx_sc->bnx_dev,
5327 "could not create tmp RX mbuf DMA map\n");
5328 ret->bnx_rx_mtag = NULL;
5335 bnx_destroy_rx_ret_ring(struct bnx_rx_ret_ring *ret)
5337 /* Destroy tmp RX mbuf DMA map */
5338 if (ret->bnx_rx_mtag != NULL)
5339 bus_dmamap_destroy(ret->bnx_rx_mtag, ret->bnx_rx_tmpmap);
5341 /* Destroy RX return ring */
5342 bnx_dma_block_free(ret->bnx_rx_ret_ring_tag,
5343 ret->bnx_rx_ret_ring_map, ret->bnx_rx_ret_ring);
5347 bnx_alloc_intr(struct bnx_softc *sc)
5349 struct bnx_intr_data *intr;
5353 if (sc->bnx_intr_cnt > 1) {
5354 error = bnx_alloc_msix(sc);
5357 KKASSERT(sc->bnx_intr_type == PCI_INTR_TYPE_MSIX);
5361 KKASSERT(sc->bnx_intr_cnt == 1);
5363 intr = &sc->bnx_intr_data[0];
5364 intr->bnx_ret = &sc->bnx_rx_ret_ring[0];
5365 intr->bnx_txr = &sc->bnx_tx_ring[0];
5366 intr->bnx_intr_serialize = &sc->bnx_main_serialize;
5367 intr->bnx_intr_check = bnx_check_intr_rxtx;
5368 intr->bnx_saved_status_tag = &intr->bnx_ret->bnx_saved_status_tag;
5370 sc->bnx_intr_type = pci_alloc_1intr(sc->bnx_dev, bnx_msi_enable,
5371 &intr->bnx_intr_rid, &intr_flags);
5373 intr->bnx_intr_res = bus_alloc_resource_any(sc->bnx_dev, SYS_RES_IRQ,
5374 &intr->bnx_intr_rid, intr_flags);
5375 if (intr->bnx_intr_res == NULL) {
5376 device_printf(sc->bnx_dev, "could not alloc interrupt\n");
5380 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI) {
5381 bnx_enable_msi(sc, FALSE);
5382 intr->bnx_intr_func = bnx_msi;
5384 device_printf(sc->bnx_dev, "oneshot MSI\n");
5386 intr->bnx_intr_func = bnx_intr_legacy;
5388 intr->bnx_intr_arg = sc;
5389 intr->bnx_intr_cpuid = rman_get_cpuid(intr->bnx_intr_res);
5391 intr->bnx_txr->bnx_tx_cpuid = intr->bnx_intr_cpuid;
5397 bnx_setup_intr(struct bnx_softc *sc)
5401 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
5402 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
5404 error = bus_setup_intr_descr(sc->bnx_dev, intr->bnx_intr_res,
5405 INTR_MPSAFE, intr->bnx_intr_func, intr->bnx_intr_arg,
5406 &intr->bnx_intr_hand, intr->bnx_intr_serialize,
5407 intr->bnx_intr_desc);
5409 device_printf(sc->bnx_dev,
5410 "could not set up %dth intr\n", i);
5411 bnx_teardown_intr(sc, i);
5419 bnx_teardown_intr(struct bnx_softc *sc, int cnt)
5423 for (i = 0; i < cnt; ++i) {
5424 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
5426 bus_teardown_intr(sc->bnx_dev, intr->bnx_intr_res,
5427 intr->bnx_intr_hand);
5432 bnx_free_intr(struct bnx_softc *sc)
5434 if (sc->bnx_intr_type != PCI_INTR_TYPE_MSIX) {
5435 struct bnx_intr_data *intr;
5437 KKASSERT(sc->bnx_intr_cnt <= 1);
5438 intr = &sc->bnx_intr_data[0];
5440 if (intr->bnx_intr_res != NULL) {
5441 bus_release_resource(sc->bnx_dev, SYS_RES_IRQ,
5442 intr->bnx_intr_rid, intr->bnx_intr_res);
5444 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI)
5445 pci_release_msi(sc->bnx_dev);
5447 bnx_free_msix(sc, TRUE);
5452 bnx_setup_serialize(struct bnx_softc *sc)
5457 * Allocate serializer array
5460 /* Main + RX STD + TX + RX RET */
5461 sc->bnx_serialize_cnt = 1 + 1 + sc->bnx_tx_ringcnt + sc->bnx_rx_retcnt;
5464 kmalloc(sc->bnx_serialize_cnt * sizeof(struct lwkt_serialize *),
5465 M_DEVBUF, M_WAITOK | M_ZERO);
5470 * NOTE: Order is critical
5475 KKASSERT(i < sc->bnx_serialize_cnt);
5476 sc->bnx_serialize[i++] = &sc->bnx_main_serialize;
5478 KKASSERT(i < sc->bnx_serialize_cnt);
5479 sc->bnx_serialize[i++] = &sc->bnx_rx_std_ring.bnx_rx_std_serialize;
5481 for (j = 0; j < sc->bnx_rx_retcnt; ++j) {
5482 KKASSERT(i < sc->bnx_serialize_cnt);
5483 sc->bnx_serialize[i++] =
5484 &sc->bnx_rx_ret_ring[j].bnx_rx_ret_serialize;
5487 for (j = 0; j < sc->bnx_tx_ringcnt; ++j) {
5488 KKASSERT(i < sc->bnx_serialize_cnt);
5489 sc->bnx_serialize[i++] =
5490 &sc->bnx_tx_ring[j].bnx_tx_serialize;
5493 KKASSERT(i == sc->bnx_serialize_cnt);
5497 bnx_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
5499 struct bnx_softc *sc = ifp->if_softc;
5501 ifnet_serialize_array_enter(sc->bnx_serialize,
5502 sc->bnx_serialize_cnt, slz);
5506 bnx_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
5508 struct bnx_softc *sc = ifp->if_softc;
5510 ifnet_serialize_array_exit(sc->bnx_serialize,
5511 sc->bnx_serialize_cnt, slz);
5515 bnx_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
5517 struct bnx_softc *sc = ifp->if_softc;
5519 return ifnet_serialize_array_try(sc->bnx_serialize,
5520 sc->bnx_serialize_cnt, slz);
5526 bnx_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
5527 boolean_t serialized)
5529 struct bnx_softc *sc = ifp->if_softc;
5531 ifnet_serialize_array_assert(sc->bnx_serialize, sc->bnx_serialize_cnt,
5535 #endif /* INVARIANTS */
5537 #ifdef IFPOLL_ENABLE
5540 bnx_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS)
5542 struct bnx_softc *sc = (void *)arg1;
5543 struct ifnet *ifp = &sc->arpcom.ac_if;
5546 off = sc->bnx_npoll_rxoff;
5547 error = sysctl_handle_int(oidp, &off, 0, req);
5548 if (error || req->newptr == NULL)
5553 ifnet_serialize_all(ifp);
5554 if (off >= ncpus2 || off % sc->bnx_rx_retcnt != 0) {
5558 sc->bnx_npoll_txoff = off;
5559 sc->bnx_npoll_rxoff = off;
5561 ifnet_deserialize_all(ifp);
5567 bnx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS)
5569 struct bnx_softc *sc = (void *)arg1;
5570 struct ifnet *ifp = &sc->arpcom.ac_if;
5573 off = sc->bnx_npoll_rxoff;
5574 error = sysctl_handle_int(oidp, &off, 0, req);
5575 if (error || req->newptr == NULL)
5580 ifnet_serialize_all(ifp);
5581 if (off >= ncpus2 || off % sc->bnx_rx_retcnt != 0) {
5585 sc->bnx_npoll_rxoff = off;
5587 ifnet_deserialize_all(ifp);
5593 bnx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS)
5595 struct bnx_softc *sc = (void *)arg1;
5596 struct ifnet *ifp = &sc->arpcom.ac_if;
5599 off = sc->bnx_npoll_txoff;
5600 error = sysctl_handle_int(oidp, &off, 0, req);
5601 if (error || req->newptr == NULL)
5606 ifnet_serialize_all(ifp);
5607 if (off >= ncpus2) {
5611 sc->bnx_npoll_txoff = off;
5613 ifnet_deserialize_all(ifp);
5618 #endif /* IFPOLL_ENABLE */
5621 bnx_set_tick_cpuid(struct bnx_softc *sc, boolean_t polling)
5624 sc->bnx_tick_cpuid = 0; /* XXX */
5626 sc->bnx_tick_cpuid = sc->bnx_intr_data[0].bnx_intr_cpuid;
5630 bnx_rx_std_refill_ithread(void *xstd)
5632 struct bnx_rx_std_ring *std = xstd;
5633 struct globaldata *gd = mycpu;
5637 while (!std->bnx_rx_std_stop) {
5638 if (std->bnx_rx_std_refill) {
5639 lwkt_serialize_handler_call(
5640 &std->bnx_rx_std_serialize,
5641 bnx_rx_std_refill, std, NULL);
5647 atomic_poll_release_int(&std->bnx_rx_std_running);
5650 if (!std->bnx_rx_std_refill && !std->bnx_rx_std_stop) {
5651 lwkt_deschedule_self(gd->gd_curthread);
5664 bnx_rx_std_refill(void *xstd, void *frame __unused)
5666 struct bnx_rx_std_ring *std = xstd;
5667 int cnt, refill_mask;
5673 refill_mask = std->bnx_rx_std_refill;
5674 atomic_clear_int(&std->bnx_rx_std_refill, refill_mask);
5676 while (refill_mask) {
5677 uint16_t check_idx = std->bnx_rx_std;
5680 ret_idx = bsfl(refill_mask);
5682 struct bnx_rx_buf *rb;
5685 BNX_INC(check_idx, BGE_STD_RX_RING_CNT);
5686 rb = &std->bnx_rx_std_buf[check_idx];
5687 refilled = rb->bnx_rx_refilled;
5690 bnx_setup_rxdesc_std(std, check_idx);
5691 std->bnx_rx_std = check_idx;
5694 atomic_subtract_int(
5695 &std->bnx_rx_std_used, cnt);
5696 bnx_writembx(std->bnx_sc,
5697 BGE_MBX_RX_STD_PROD_LO,
5705 refill_mask &= ~(1 << ret_idx);
5709 atomic_subtract_int(&std->bnx_rx_std_used, cnt);
5710 bnx_writembx(std->bnx_sc, BGE_MBX_RX_STD_PROD_LO,
5714 if (std->bnx_rx_std_refill)
5717 atomic_poll_release_int(&std->bnx_rx_std_running);
5720 if (std->bnx_rx_std_refill)
5725 bnx_sysctl_std_refill(SYSCTL_HANDLER_ARGS)
5727 struct bnx_softc *sc = (void *)arg1;
5728 struct ifnet *ifp = &sc->arpcom.ac_if;
5729 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0];
5730 int error, cntmax, i;
5732 cntmax = ret->bnx_rx_cntmax;
5733 error = sysctl_handle_int(oidp, &cntmax, 0, req);
5734 if (error || req->newptr == NULL)
5737 ifnet_serialize_all(ifp);
5739 if ((cntmax * sc->bnx_rx_retcnt) >= BGE_STD_RX_RING_CNT / 2) {
5744 for (i = 0; i < sc->bnx_tx_ringcnt; ++i)
5745 sc->bnx_rx_ret_ring[i].bnx_rx_cntmax = cntmax;
5749 ifnet_deserialize_all(ifp);
5755 bnx_init_rss(struct bnx_softc *sc)
5757 uint8_t key[BGE_RSS_KEYREG_CNT * BGE_RSS_KEYREG_SIZE];
5760 KKASSERT(BNX_RSS_ENABLED(sc));
5763 * Configure RSS redirect table in following fashion:
5764 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
5767 for (j = 0; j < BGE_RSS_INDIR_TBL_CNT; ++j) {
5770 for (i = 0; i < BGE_RSS_INDIR_TBLENT_CNT; ++i) {
5773 q = r % sc->bnx_rx_retcnt;
5774 tbl |= q << (BGE_RSS_INDIR_TBLENT_SHIFT *
5775 (BGE_RSS_INDIR_TBLENT_CNT - i - 1));
5779 BNX_RSS_DPRINTF(sc, 1, "tbl%d %08x\n", j, tbl);
5780 CSR_WRITE_4(sc, BGE_RSS_INDIR_TBL(j), tbl);
5783 toeplitz_get_key(key, sizeof(key));
5784 for (i = 0; i < BGE_RSS_KEYREG_CNT; ++i) {
5787 keyreg = BGE_RSS_KEYREG_VAL(key, i);
5789 BNX_RSS_DPRINTF(sc, 1, "key%d %08x\n", i, keyreg);
5790 CSR_WRITE_4(sc, BGE_RSS_KEYREG(i), keyreg);
5795 bnx_setup_ring_cnt(struct bnx_softc *sc)
5797 int msix_enable, i, msix_cnt, msix_cnt2, ring_max;
5799 sc->bnx_tx_ringcnt = 1;
5800 sc->bnx_rx_retcnt = 1;
5801 sc->bnx_intr_cnt = 1;
5803 msix_enable = device_getenv_int(sc->bnx_dev, "msix.enable",
5811 msix_cnt = pci_msix_count(sc->bnx_dev);
5816 while ((1 << (i + 1)) <= msix_cnt)
5821 * One MSI-X vector is dedicated to status or single TX queue,
5822 * so make sure that there are enough MSI-X vectors.
5824 if (msix_cnt == msix_cnt2) {
5827 * This probably will not happen; 57785/5718 families
5828 * come with at least 5 MSI-X vectors.
5831 if (msix_cnt2 <= 1) {
5832 device_printf(sc->bnx_dev,
5833 "MSI-X count %d could not be used\n", msix_cnt);
5836 device_printf(sc->bnx_dev, "MSI-X count %d is power of 2\n",
5841 * Setup RX ring count
5843 ring_max = BNX_RX_RING_MAX;
5844 if (ring_max > msix_cnt2)
5845 ring_max = msix_cnt2;
5846 sc->bnx_rx_retcnt = device_getenv_int(sc->bnx_dev, "rx_rings",
5848 sc->bnx_rx_retcnt = if_ring_count2(sc->bnx_rx_retcnt, ring_max);
5850 if (sc->bnx_rx_retcnt == 1)
5854 * We need one extra MSI-X vector for link status or
5855 * TX ring (if only one TX ring is enabled).
5857 sc->bnx_intr_cnt = sc->bnx_rx_retcnt + 1;
5860 * Setup TX ring count
5862 * Currently only BCM5719 and BCM5720 support multiple TX rings
5863 * and the TX ring count must be less than the RX ring count.
5865 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
5866 sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
5867 ring_max = BNX_TX_RING_MAX;
5868 if (ring_max > msix_cnt2)
5869 ring_max = msix_cnt2;
5870 if (ring_max > sc->bnx_rx_retcnt)
5871 ring_max = sc->bnx_rx_retcnt;
5872 sc->bnx_tx_ringcnt = device_getenv_int(sc->bnx_dev, "tx_rings",
5874 sc->bnx_tx_ringcnt = if_ring_count2(sc->bnx_tx_ringcnt,
5880 bnx_alloc_msix(struct bnx_softc *sc)
5882 struct bnx_intr_data *intr;
5883 boolean_t setup = FALSE;
5884 int error, i, offset, offset_def;
5886 KKASSERT(sc->bnx_intr_cnt > 1);
5887 KKASSERT(sc->bnx_intr_cnt == sc->bnx_rx_retcnt + 1);
5889 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) {
5893 intr = &sc->bnx_intr_data[0];
5895 intr->bnx_intr_serialize = &sc->bnx_main_serialize;
5896 intr->bnx_saved_status_tag = &sc->bnx_saved_status_tag;
5898 intr->bnx_intr_func = bnx_msix_status;
5899 intr->bnx_intr_arg = sc;
5900 intr->bnx_intr_cpuid = 0; /* XXX */
5902 ksnprintf(intr->bnx_intr_desc0, sizeof(intr->bnx_intr_desc0),
5903 "%s sts", device_get_nameunit(sc->bnx_dev));
5904 intr->bnx_intr_desc = intr->bnx_intr_desc0;
5909 if (sc->bnx_rx_retcnt == ncpus2) {
5912 offset_def = (sc->bnx_rx_retcnt *
5913 device_get_unit(sc->bnx_dev)) % ncpus2;
5915 offset = device_getenv_int(sc->bnx_dev,
5916 "msix.offset", offset_def);
5917 if (offset >= ncpus2 ||
5918 offset % sc->bnx_rx_retcnt != 0) {
5919 device_printf(sc->bnx_dev,
5920 "invalid msix.offset %d, use %d\n",
5921 offset, offset_def);
5922 offset = offset_def;
5926 for (i = 1; i < sc->bnx_intr_cnt; ++i) {
5929 intr = &sc->bnx_intr_data[i];
5931 KKASSERT(idx < sc->bnx_rx_retcnt);
5932 intr->bnx_ret = &sc->bnx_rx_ret_ring[idx];
5933 if (idx < sc->bnx_tx_ringcnt) {
5934 intr->bnx_txr = &sc->bnx_tx_ring[idx];
5935 intr->bnx_ret->bnx_txr = intr->bnx_txr;
5938 intr->bnx_intr_serialize =
5939 &intr->bnx_ret->bnx_rx_ret_serialize;
5940 intr->bnx_saved_status_tag =
5941 &intr->bnx_ret->bnx_saved_status_tag;
5943 intr->bnx_intr_arg = intr->bnx_ret;
5944 KKASSERT(idx + offset < ncpus2);
5945 intr->bnx_intr_cpuid = idx + offset;
5947 if (intr->bnx_txr == NULL) {
5948 intr->bnx_intr_check = bnx_check_intr_rx;
5949 intr->bnx_intr_func = bnx_msix_rx;
5950 ksnprintf(intr->bnx_intr_desc0,
5951 sizeof(intr->bnx_intr_desc0), "%s rx%d",
5952 device_get_nameunit(sc->bnx_dev), idx);
5954 intr->bnx_intr_check = bnx_check_intr_rxtx;
5955 intr->bnx_intr_func = bnx_msix_rxtx;
5956 ksnprintf(intr->bnx_intr_desc0,
5957 sizeof(intr->bnx_intr_desc0), "%s rxtx%d",
5958 device_get_nameunit(sc->bnx_dev), idx);
5960 intr->bnx_txr->bnx_tx_cpuid =
5961 intr->bnx_intr_cpuid;
5963 intr->bnx_intr_desc = intr->bnx_intr_desc0;
5965 intr->bnx_ret->bnx_msix_mbx = intr->bnx_intr_mbx;
5969 * TX ring and link status
5971 offset_def = device_get_unit(sc->bnx_dev) % ncpus2;
5972 offset = device_getenv_int(sc->bnx_dev, "msix.txoff",
5974 if (offset >= ncpus2) {
5975 device_printf(sc->bnx_dev,
5976 "invalid msix.txoff %d, use %d\n",
5977 offset, offset_def);
5978 offset = offset_def;
5981 intr = &sc->bnx_intr_data[0];
5983 intr->bnx_txr = &sc->bnx_tx_ring[0];
5984 intr->bnx_intr_serialize = &sc->bnx_main_serialize;
5985 intr->bnx_intr_check = bnx_check_intr_tx;
5986 intr->bnx_saved_status_tag =
5987 &intr->bnx_txr->bnx_saved_status_tag;
5989 intr->bnx_intr_func = bnx_msix_tx_status;
5990 intr->bnx_intr_arg = intr->bnx_txr;
5991 intr->bnx_intr_cpuid = offset;
5993 ksnprintf(intr->bnx_intr_desc0, sizeof(intr->bnx_intr_desc0),
5994 "%s ststx", device_get_nameunit(sc->bnx_dev));
5995 intr->bnx_intr_desc = intr->bnx_intr_desc0;
5997 intr->bnx_txr->bnx_tx_cpuid = intr->bnx_intr_cpuid;
6002 if (sc->bnx_rx_retcnt == ncpus2) {
6005 offset_def = (sc->bnx_rx_retcnt *
6006 device_get_unit(sc->bnx_dev)) % ncpus2;
6008 offset = device_getenv_int(sc->bnx_dev,
6009 "msix.rxoff", offset_def);
6010 if (offset >= ncpus2 ||
6011 offset % sc->bnx_rx_retcnt != 0) {
6012 device_printf(sc->bnx_dev,
6013 "invalid msix.rxoff %d, use %d\n",
6014 offset, offset_def);
6015 offset = offset_def;
6019 for (i = 1; i < sc->bnx_intr_cnt; ++i) {
6022 intr = &sc->bnx_intr_data[i];
6024 KKASSERT(idx < sc->bnx_rx_retcnt);
6025 intr->bnx_ret = &sc->bnx_rx_ret_ring[idx];
6026 intr->bnx_intr_serialize =
6027 &intr->bnx_ret->bnx_rx_ret_serialize;
6028 intr->bnx_intr_check = bnx_check_intr_rx;
6029 intr->bnx_saved_status_tag =
6030 &intr->bnx_ret->bnx_saved_status_tag;
6032 intr->bnx_intr_func = bnx_msix_rx;
6033 intr->bnx_intr_arg = intr->bnx_ret;
6034 KKASSERT(idx + offset < ncpus2);
6035 intr->bnx_intr_cpuid = idx + offset;
6037 ksnprintf(intr->bnx_intr_desc0,
6038 sizeof(intr->bnx_intr_desc0), "%s rx%d",
6039 device_get_nameunit(sc->bnx_dev), idx);
6040 intr->bnx_intr_desc = intr->bnx_intr_desc0;
6042 intr->bnx_ret->bnx_msix_mbx = intr->bnx_intr_mbx;
6046 if (BNX_IS_5717_PLUS(sc)) {
6047 sc->bnx_msix_mem_rid = PCIR_BAR(4);
6049 if (sc->bnx_res2 == NULL)
6050 sc->bnx_msix_mem_rid = PCIR_BAR(2);
6052 if (sc->bnx_msix_mem_rid != 0) {
6053 sc->bnx_msix_mem_res = bus_alloc_resource_any(sc->bnx_dev,
6054 SYS_RES_MEMORY, &sc->bnx_msix_mem_rid, RF_ACTIVE);
6055 if (sc->bnx_msix_mem_res == NULL) {
6056 device_printf(sc->bnx_dev,
6057 "could not alloc MSI-X table\n");
6062 bnx_enable_msi(sc, TRUE);
6064 error = pci_setup_msix(sc->bnx_dev);
6066 device_printf(sc->bnx_dev, "could not setup MSI-X\n");
6071 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
6072 intr = &sc->bnx_intr_data[i];
6074 error = pci_alloc_msix_vector(sc->bnx_dev, i,
6075 &intr->bnx_intr_rid, intr->bnx_intr_cpuid);
6077 device_printf(sc->bnx_dev,
6078 "could not alloc MSI-X %d on cpu%d\n",
6079 i, intr->bnx_intr_cpuid);
6083 intr->bnx_intr_res = bus_alloc_resource_any(sc->bnx_dev,
6084 SYS_RES_IRQ, &intr->bnx_intr_rid, RF_ACTIVE);
6085 if (intr->bnx_intr_res == NULL) {
6086 device_printf(sc->bnx_dev,
6087 "could not alloc MSI-X %d resource\n", i);
6093 pci_enable_msix(sc->bnx_dev);
6094 sc->bnx_intr_type = PCI_INTR_TYPE_MSIX;
6097 bnx_free_msix(sc, setup);
6102 bnx_free_msix(struct bnx_softc *sc, boolean_t setup)
6106 KKASSERT(sc->bnx_intr_cnt > 1);
6108 for (i = 0; i < sc->bnx_intr_cnt; ++i) {
6109 struct bnx_intr_data *intr = &sc->bnx_intr_data[i];
6111 if (intr->bnx_intr_res != NULL) {
6112 bus_release_resource(sc->bnx_dev, SYS_RES_IRQ,
6113 intr->bnx_intr_rid, intr->bnx_intr_res);
6115 if (intr->bnx_intr_rid >= 0) {
6116 pci_release_msix_vector(sc->bnx_dev,
6117 intr->bnx_intr_rid);
6121 pci_teardown_msix(sc->bnx_dev);
6125 bnx_rx_std_refill_sched_ipi(void *xret)
6127 struct bnx_rx_ret_ring *ret = xret;
6128 struct bnx_rx_std_ring *std = ret->bnx_std;
6129 struct globaldata *gd = mycpu;
6133 atomic_set_int(&std->bnx_rx_std_refill, ret->bnx_rx_mask);
6136 KKASSERT(std->bnx_rx_std_ithread->td_gd == gd);
6137 lwkt_schedule(std->bnx_rx_std_ithread);
6143 bnx_rx_std_refill_stop(void *xstd)
6145 struct bnx_rx_std_ring *std = xstd;
6146 struct globaldata *gd = mycpu;
6150 std->bnx_rx_std_stop = 1;
6153 KKASSERT(std->bnx_rx_std_ithread->td_gd == gd);
6154 lwkt_schedule(std->bnx_rx_std_ithread);
6160 bnx_serialize_skipmain(struct bnx_softc *sc)
6162 lwkt_serialize_array_enter(sc->bnx_serialize,
6163 sc->bnx_serialize_cnt, 1);
6167 bnx_deserialize_skipmain(struct bnx_softc *sc)
6169 lwkt_serialize_array_exit(sc->bnx_serialize,
6170 sc->bnx_serialize_cnt, 1);
6174 bnx_rx_std_refill_sched(struct bnx_rx_ret_ring *ret,
6175 struct bnx_rx_std_ring *std)
6177 struct globaldata *gd = mycpu;
6179 ret->bnx_rx_cnt = 0;
6184 atomic_set_int(&std->bnx_rx_std_refill, ret->bnx_rx_mask);
6186 if (atomic_poll_acquire_int(&std->bnx_rx_std_running)) {
6187 if (std->bnx_rx_std_ithread->td_gd == gd) {
6188 lwkt_schedule(std->bnx_rx_std_ithread);
6190 lwkt_send_ipiq(std->bnx_rx_std_ithread->td_gd,
6191 bnx_rx_std_refill_sched_ipi, ret);
6198 static struct pktinfo *
6199 bnx_rss_info(struct pktinfo *pi, const struct bge_rx_bd *cur_rx)
6201 /* Don't pick up IPv6 packet */
6202 if (cur_rx->bge_flags & BGE_RXBDFLAG_IPV6)
6205 /* Don't pick up IP packet w/o IP checksum */
6206 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) == 0 ||
6207 (cur_rx->bge_error_flag & BGE_RXERRFLAG_IP_CSUM_NOK))
6210 /* Don't pick up IP packet w/o TCP/UDP checksum */
6211 if ((cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) == 0)
6214 /* May be IP fragment */
6215 if (cur_rx->bge_tcp_udp_csum != 0xffff)
6218 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_IS_TCP)
6219 pi->pi_l3proto = IPPROTO_TCP;
6221 pi->pi_l3proto = IPPROTO_UDP;
6222 pi->pi_netisr = NETISR_IP;
6229 bnx_sig_pre_reset(struct bnx_softc *sc, int type)
6231 if (type == BNX_RESET_START || type == BNX_RESET_SUSPEND)
6232 bnx_ape_driver_state_change(sc, type);
6236 bnx_sig_post_reset(struct bnx_softc *sc, int type)
6238 if (type == BNX_RESET_SHUTDOWN)
6239 bnx_ape_driver_state_change(sc, type);
6243 * Clear all stale locks and select the lock for this driver instance.
6246 bnx_ape_lock_init(struct bnx_softc *sc)
6248 uint32_t bit, regbase;
6251 regbase = BGE_APE_PER_LOCK_GRANT;
6253 /* Clear any stale locks. */
6254 for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) {
6256 case BGE_APE_LOCK_PHY0:
6257 case BGE_APE_LOCK_PHY1:
6258 case BGE_APE_LOCK_PHY2:
6259 case BGE_APE_LOCK_PHY3:
6260 bit = BGE_APE_LOCK_GRANT_DRIVER0;
6264 if (sc->bnx_func_addr == 0)
6265 bit = BGE_APE_LOCK_GRANT_DRIVER0;
6267 bit = 1 << sc->bnx_func_addr;
6270 APE_WRITE_4(sc, regbase + 4 * i, bit);
6273 /* Select the PHY lock based on the device's function number. */
6274 switch (sc->bnx_func_addr) {
6276 sc->bnx_phy_ape_lock = BGE_APE_LOCK_PHY0;
6280 sc->bnx_phy_ape_lock = BGE_APE_LOCK_PHY1;
6284 sc->bnx_phy_ape_lock = BGE_APE_LOCK_PHY2;
6288 sc->bnx_phy_ape_lock = BGE_APE_LOCK_PHY3;
6292 device_printf(sc->bnx_dev,
6293 "PHY lock not supported on this function\n");
6299 * Check for APE firmware, set flags, and print version info.
6302 bnx_ape_read_fw_ver(struct bnx_softc *sc)
6305 uint32_t apedata, features;
6307 /* Check for a valid APE signature in shared memory. */
6308 apedata = APE_READ_4(sc, BGE_APE_SEG_SIG);
6309 if (apedata != BGE_APE_SEG_SIG_MAGIC) {
6310 device_printf(sc->bnx_dev, "no APE signature\n");
6311 sc->bnx_mfw_flags &= ~BNX_MFW_ON_APE;
6315 /* Check if APE firmware is running. */
6316 apedata = APE_READ_4(sc, BGE_APE_FW_STATUS);
6317 if ((apedata & BGE_APE_FW_STATUS_READY) == 0) {
6318 device_printf(sc->bnx_dev, "APE signature found "
6319 "but FW status not ready! 0x%08x\n", apedata);
6323 sc->bnx_mfw_flags |= BNX_MFW_ON_APE;
6325 /* Fetch the APE firwmare type and version. */
6326 apedata = APE_READ_4(sc, BGE_APE_FW_VERSION);
6327 features = APE_READ_4(sc, BGE_APE_FW_FEATURES);
6328 if (features & BGE_APE_FW_FEATURE_NCSI) {
6329 sc->bnx_mfw_flags |= BNX_MFW_TYPE_NCSI;
6331 } else if (features & BGE_APE_FW_FEATURE_DASH) {
6332 sc->bnx_mfw_flags |= BNX_MFW_TYPE_DASH;
6338 /* Print the APE firmware version. */
6339 device_printf(sc->bnx_dev, "APE FW version: %s v%d.%d.%d.%d\n",
6341 (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT,
6342 (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT,
6343 (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT,
6344 (apedata & BGE_APE_FW_VERSION_BLDMSK));
6348 bnx_ape_lock(struct bnx_softc *sc, int locknum)
6350 uint32_t bit, gnt, req, status;
6353 if ((sc->bnx_mfw_flags & BNX_MFW_ON_APE) == 0)
6356 /* Lock request/grant registers have different bases. */
6357 req = BGE_APE_PER_LOCK_REQ;
6358 gnt = BGE_APE_PER_LOCK_GRANT;
6363 case BGE_APE_LOCK_GPIO:
6364 /* Lock required when using GPIO. */
6365 if (sc->bnx_func_addr == 0)
6366 bit = BGE_APE_LOCK_REQ_DRIVER0;
6368 bit = 1 << sc->bnx_func_addr;
6371 case BGE_APE_LOCK_GRC:
6372 /* Lock required to reset the device. */
6373 if (sc->bnx_func_addr == 0)
6374 bit = BGE_APE_LOCK_REQ_DRIVER0;
6376 bit = 1 << sc->bnx_func_addr;
6379 case BGE_APE_LOCK_MEM:
6380 /* Lock required when accessing certain APE memory. */
6381 if (sc->bnx_func_addr == 0)
6382 bit = BGE_APE_LOCK_REQ_DRIVER0;
6384 bit = 1 << sc->bnx_func_addr;
6387 case BGE_APE_LOCK_PHY0:
6388 case BGE_APE_LOCK_PHY1:
6389 case BGE_APE_LOCK_PHY2:
6390 case BGE_APE_LOCK_PHY3:
6391 /* Lock required when accessing PHYs. */
6392 bit = BGE_APE_LOCK_REQ_DRIVER0;
6399 /* Request a lock. */
6400 APE_WRITE_4(sc, req + off, bit);
6402 /* Wait up to 1 second to acquire lock. */
6403 for (i = 0; i < 20000; i++) {
6404 status = APE_READ_4(sc, gnt + off);
6410 /* Handle any errors. */
6411 if (status != bit) {
6412 if_printf(&sc->arpcom.ac_if, "APE lock %d request failed! "
6413 "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n",
6414 locknum, req + off, bit & 0xFFFF, gnt + off,
6416 /* Revoke the lock request. */
6417 APE_WRITE_4(sc, gnt + off, bit);
6425 bnx_ape_unlock(struct bnx_softc *sc, int locknum)
6430 if ((sc->bnx_mfw_flags & BNX_MFW_ON_APE) == 0)
6433 gnt = BGE_APE_PER_LOCK_GRANT;
6438 case BGE_APE_LOCK_GPIO:
6439 if (sc->bnx_func_addr == 0)
6440 bit = BGE_APE_LOCK_GRANT_DRIVER0;
6442 bit = 1 << sc->bnx_func_addr;
6445 case BGE_APE_LOCK_GRC:
6446 if (sc->bnx_func_addr == 0)
6447 bit = BGE_APE_LOCK_GRANT_DRIVER0;
6449 bit = 1 << sc->bnx_func_addr;
6452 case BGE_APE_LOCK_MEM:
6453 if (sc->bnx_func_addr == 0)
6454 bit = BGE_APE_LOCK_GRANT_DRIVER0;
6456 bit = 1 << sc->bnx_func_addr;
6459 case BGE_APE_LOCK_PHY0:
6460 case BGE_APE_LOCK_PHY1:
6461 case BGE_APE_LOCK_PHY2:
6462 case BGE_APE_LOCK_PHY3:
6463 bit = BGE_APE_LOCK_GRANT_DRIVER0;
6470 APE_WRITE_4(sc, gnt + off, bit);
6474 * Send an event to the APE firmware.
6477 bnx_ape_send_event(struct bnx_softc *sc, uint32_t event)
6482 /* NCSI does not support APE events. */
6483 if ((sc->bnx_mfw_flags & BNX_MFW_ON_APE) == 0)
6486 /* Wait up to 1ms for APE to service previous event. */
6487 for (i = 10; i > 0; i--) {
6488 if (bnx_ape_lock(sc, BGE_APE_LOCK_MEM) != 0)
6490 apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS);
6491 if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) {
6492 APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event |
6493 BGE_APE_EVENT_STATUS_EVENT_PENDING);
6494 bnx_ape_unlock(sc, BGE_APE_LOCK_MEM);
6495 APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1);
6498 bnx_ape_unlock(sc, BGE_APE_LOCK_MEM);
6502 if_printf(&sc->arpcom.ac_if,
6503 "APE event 0x%08x send timed out\n", event);
6508 bnx_ape_driver_state_change(struct bnx_softc *sc, int kind)
6510 uint32_t apedata, event;
6512 if ((sc->bnx_mfw_flags & BNX_MFW_ON_APE) == 0)
6516 case BNX_RESET_START:
6517 /* If this is the first load, clear the load counter. */
6518 apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG);
6519 if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC) {
6520 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0);
6522 apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT);
6523 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata);
6525 APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG,
6526 BGE_APE_HOST_SEG_SIG_MAGIC);
6527 APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN,
6528 BGE_APE_HOST_SEG_LEN_MAGIC);
6530 /* Add some version info if bnx(4) supports it. */
6531 APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID,
6532 BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0));
6533 APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR,
6534 BGE_APE_HOST_BEHAV_NO_PHYLOCK);
6535 APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS,
6536 BGE_APE_HOST_HEARTBEAT_INT_DISABLE);
6537 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
6538 BGE_APE_HOST_DRVR_STATE_START);
6539 event = BGE_APE_EVENT_STATUS_STATE_START;
6542 case BNX_RESET_SHUTDOWN:
6543 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
6544 BGE_APE_HOST_DRVR_STATE_UNLOAD);
6545 event = BGE_APE_EVENT_STATUS_STATE_UNLOAD;
6548 case BNX_RESET_SUSPEND:
6549 event = BGE_APE_EVENT_STATUS_STATE_SUSPEND;
6556 bnx_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT |
6557 BGE_APE_EVENT_STATUS_STATE_CHNGE);