| 1 | /* |
| 2 | * Copyright (c) 2001 Wind River Systems |
| 3 | * Copyright (c) 1997, 1998, 1999, 2001 |
| 4 | * Bill Paul <wpaul@windriver.com>. All rights reserved. |
| 5 | * |
| 6 | * Redistribution and use in source and binary forms, with or without |
| 7 | * modification, are permitted provided that the following conditions |
| 8 | * are met: |
| 9 | * 1. Redistributions of source code must retain the above copyright |
| 10 | * notice, this list of conditions and the following disclaimer. |
| 11 | * 2. Redistributions in binary form must reproduce the above copyright |
| 12 | * notice, this list of conditions and the following disclaimer in the |
| 13 | * documentation and/or other materials provided with the distribution. |
| 14 | * 3. All advertising materials mentioning features or use of this software |
| 15 | * must display the following acknowledgement: |
| 16 | * This product includes software developed by Bill Paul. |
| 17 | * 4. Neither the name of the author nor the names of any co-contributors |
| 18 | * may be used to endorse or promote products derived from this software |
| 19 | * without specific prior written permission. |
| 20 | * |
| 21 | * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND |
| 22 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 23 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 24 | * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD |
| 25 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 26 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 27 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 28 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 29 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 30 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
| 31 | * THE POSSIBILITY OF SUCH DAMAGE. |
| 32 | * |
| 33 | * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $ |
| 34 | */ |
| 35 | |
| 36 | #include "opt_bnx.h" |
| 37 | #include "opt_polling.h" |
| 38 | |
| 39 | #include <sys/param.h> |
| 40 | #include <sys/bus.h> |
| 41 | #include <sys/endian.h> |
| 42 | #include <sys/kernel.h> |
| 43 | #include <sys/interrupt.h> |
| 44 | #include <sys/mbuf.h> |
| 45 | #include <sys/malloc.h> |
| 46 | #include <sys/queue.h> |
| 47 | #include <sys/rman.h> |
| 48 | #include <sys/serialize.h> |
| 49 | #include <sys/socket.h> |
| 50 | #include <sys/sockio.h> |
| 51 | #include <sys/sysctl.h> |
| 52 | |
| 53 | #include <netinet/ip.h> |
| 54 | #include <netinet/tcp.h> |
| 55 | |
| 56 | #include <net/bpf.h> |
| 57 | #include <net/ethernet.h> |
| 58 | #include <net/if.h> |
| 59 | #include <net/if_arp.h> |
| 60 | #include <net/if_dl.h> |
| 61 | #include <net/if_media.h> |
| 62 | #include <net/if_types.h> |
| 63 | #include <net/ifq_var.h> |
| 64 | #include <net/vlan/if_vlan_var.h> |
| 65 | #include <net/vlan/if_vlan_ether.h> |
| 66 | |
| 67 | #include <dev/netif/mii_layer/mii.h> |
| 68 | #include <dev/netif/mii_layer/miivar.h> |
| 69 | #include <dev/netif/mii_layer/brgphyreg.h> |
| 70 | |
| 71 | #include <bus/pci/pcidevs.h> |
| 72 | #include <bus/pci/pcireg.h> |
| 73 | #include <bus/pci/pcivar.h> |
| 74 | |
| 75 | #include <dev/netif/bge/if_bgereg.h> |
| 76 | #include <dev/netif/bnx/if_bnxvar.h> |
| 77 | |
| 78 | /* "device miibus" required. See GENERIC if you get errors here. */ |
| 79 | #include "miibus_if.h" |
| 80 | |
| 81 | #define BNX_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) |
| 82 | |
| 83 | #define BNX_INTR_CKINTVL ((10 * hz) / 1000) /* 10ms */ |
| 84 | |
| 85 | static const struct bnx_type { |
| 86 | uint16_t bnx_vid; |
| 87 | uint16_t bnx_did; |
| 88 | char *bnx_name; |
| 89 | } bnx_devs[] = { |
| 90 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717, |
| 91 | "Broadcom BCM5717 Gigabit Ethernet" }, |
| 92 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718, |
| 93 | "Broadcom BCM5718 Gigabit Ethernet" }, |
| 94 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5719, |
| 95 | "Broadcom BCM5719 Gigabit Ethernet" }, |
| 96 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720_ALT, |
| 97 | "Broadcom BCM5720 Gigabit Ethernet" }, |
| 98 | |
| 99 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57761, |
| 100 | "Broadcom BCM57761 Gigabit Ethernet" }, |
| 101 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57762, |
| 102 | "Broadcom BCM57762 Gigabit Ethernet" }, |
| 103 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57765, |
| 104 | "Broadcom BCM57765 Gigabit Ethernet" }, |
| 105 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57766, |
| 106 | "Broadcom BCM57766 Gigabit Ethernet" }, |
| 107 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57781, |
| 108 | "Broadcom BCM57781 Gigabit Ethernet" }, |
| 109 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57782, |
| 110 | "Broadcom BCM57782 Gigabit Ethernet" }, |
| 111 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57785, |
| 112 | "Broadcom BCM57785 Gigabit Ethernet" }, |
| 113 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57786, |
| 114 | "Broadcom BCM57786 Gigabit Ethernet" }, |
| 115 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57791, |
| 116 | "Broadcom BCM57791 Fast Ethernet" }, |
| 117 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57795, |
| 118 | "Broadcom BCM57795 Fast Ethernet" }, |
| 119 | |
| 120 | { 0, 0, NULL } |
| 121 | }; |
| 122 | |
| 123 | #define BNX_IS_JUMBO_CAPABLE(sc) ((sc)->bnx_flags & BNX_FLAG_JUMBO) |
| 124 | #define BNX_IS_5717_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_5717_PLUS) |
| 125 | #define BNX_IS_57765_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_57765_PLUS) |
| 126 | #define BNX_IS_57765_FAMILY(sc) \ |
| 127 | ((sc)->bnx_flags & BNX_FLAG_57765_FAMILY) |
| 128 | |
| 129 | typedef int (*bnx_eaddr_fcn_t)(struct bnx_softc *, uint8_t[]); |
| 130 | |
| 131 | static int bnx_probe(device_t); |
| 132 | static int bnx_attach(device_t); |
| 133 | static int bnx_detach(device_t); |
| 134 | static void bnx_shutdown(device_t); |
| 135 | static int bnx_suspend(device_t); |
| 136 | static int bnx_resume(device_t); |
| 137 | static int bnx_miibus_readreg(device_t, int, int); |
| 138 | static int bnx_miibus_writereg(device_t, int, int, int); |
| 139 | static void bnx_miibus_statchg(device_t); |
| 140 | |
| 141 | #ifdef DEVICE_POLLING |
| 142 | static void bnx_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); |
| 143 | #endif |
| 144 | static void bnx_intr_legacy(void *); |
| 145 | static void bnx_msi(void *); |
| 146 | static void bnx_msi_oneshot(void *); |
| 147 | static void bnx_intr(struct bnx_softc *); |
| 148 | static void bnx_enable_intr(struct bnx_softc *); |
| 149 | static void bnx_disable_intr(struct bnx_softc *); |
| 150 | static void bnx_txeof(struct bnx_softc *, uint16_t); |
| 151 | static void bnx_rxeof(struct bnx_softc *, uint16_t); |
| 152 | |
| 153 | static void bnx_start(struct ifnet *); |
| 154 | static int bnx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); |
| 155 | static void bnx_init(void *); |
| 156 | static void bnx_stop(struct bnx_softc *); |
| 157 | static void bnx_watchdog(struct ifnet *); |
| 158 | static int bnx_ifmedia_upd(struct ifnet *); |
| 159 | static void bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *); |
| 160 | static void bnx_tick(void *); |
| 161 | |
| 162 | static int bnx_alloc_jumbo_mem(struct bnx_softc *); |
| 163 | static void bnx_free_jumbo_mem(struct bnx_softc *); |
| 164 | static struct bnx_jslot |
| 165 | *bnx_jalloc(struct bnx_softc *); |
| 166 | static void bnx_jfree(void *); |
| 167 | static void bnx_jref(void *); |
| 168 | static int bnx_newbuf_std(struct bnx_softc *, int, int); |
| 169 | static int bnx_newbuf_jumbo(struct bnx_softc *, int, int); |
| 170 | static void bnx_setup_rxdesc_std(struct bnx_softc *, int); |
| 171 | static void bnx_setup_rxdesc_jumbo(struct bnx_softc *, int); |
| 172 | static int bnx_init_rx_ring_std(struct bnx_softc *); |
| 173 | static void bnx_free_rx_ring_std(struct bnx_softc *); |
| 174 | static int bnx_init_rx_ring_jumbo(struct bnx_softc *); |
| 175 | static void bnx_free_rx_ring_jumbo(struct bnx_softc *); |
| 176 | static void bnx_free_tx_ring(struct bnx_softc *); |
| 177 | static int bnx_init_tx_ring(struct bnx_softc *); |
| 178 | static int bnx_dma_alloc(struct bnx_softc *); |
| 179 | static void bnx_dma_free(struct bnx_softc *); |
| 180 | static int bnx_dma_block_alloc(struct bnx_softc *, bus_size_t, |
| 181 | bus_dma_tag_t *, bus_dmamap_t *, void **, bus_addr_t *); |
| 182 | static void bnx_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *); |
| 183 | static struct mbuf * |
| 184 | bnx_defrag_shortdma(struct mbuf *); |
| 185 | static int bnx_encap(struct bnx_softc *, struct mbuf **, uint32_t *); |
| 186 | static int bnx_setup_tso(struct bnx_softc *, struct mbuf **, |
| 187 | uint16_t *, uint16_t *); |
| 188 | |
| 189 | static void bnx_reset(struct bnx_softc *); |
| 190 | static int bnx_chipinit(struct bnx_softc *); |
| 191 | static int bnx_blockinit(struct bnx_softc *); |
| 192 | static void bnx_stop_block(struct bnx_softc *, bus_size_t, uint32_t); |
| 193 | static void bnx_enable_msi(struct bnx_softc *sc); |
| 194 | static void bnx_setmulti(struct bnx_softc *); |
| 195 | static void bnx_setpromisc(struct bnx_softc *); |
| 196 | static void bnx_stats_update_regs(struct bnx_softc *); |
| 197 | static uint32_t bnx_dma_swap_options(struct bnx_softc *); |
| 198 | |
| 199 | static uint32_t bnx_readmem_ind(struct bnx_softc *, uint32_t); |
| 200 | static void bnx_writemem_ind(struct bnx_softc *, uint32_t, uint32_t); |
| 201 | #ifdef notdef |
| 202 | static uint32_t bnx_readreg_ind(struct bnx_softc *, uint32_t); |
| 203 | #endif |
| 204 | static void bnx_writereg_ind(struct bnx_softc *, uint32_t, uint32_t); |
| 205 | static void bnx_writemem_direct(struct bnx_softc *, uint32_t, uint32_t); |
| 206 | static void bnx_writembx(struct bnx_softc *, int, int); |
| 207 | static uint8_t bnx_nvram_getbyte(struct bnx_softc *, int, uint8_t *); |
| 208 | static int bnx_read_nvram(struct bnx_softc *, caddr_t, int, int); |
| 209 | static uint8_t bnx_eeprom_getbyte(struct bnx_softc *, uint32_t, uint8_t *); |
| 210 | static int bnx_read_eeprom(struct bnx_softc *, caddr_t, uint32_t, size_t); |
| 211 | |
| 212 | static void bnx_tbi_link_upd(struct bnx_softc *, uint32_t); |
| 213 | static void bnx_copper_link_upd(struct bnx_softc *, uint32_t); |
| 214 | static void bnx_autopoll_link_upd(struct bnx_softc *, uint32_t); |
| 215 | static void bnx_link_poll(struct bnx_softc *); |
| 216 | |
| 217 | static int bnx_get_eaddr_mem(struct bnx_softc *, uint8_t[]); |
| 218 | static int bnx_get_eaddr_nvram(struct bnx_softc *, uint8_t[]); |
| 219 | static int bnx_get_eaddr_eeprom(struct bnx_softc *, uint8_t[]); |
| 220 | static int bnx_get_eaddr(struct bnx_softc *, uint8_t[]); |
| 221 | |
| 222 | static void bnx_coal_change(struct bnx_softc *); |
| 223 | static int bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS); |
| 224 | static int bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS); |
| 225 | static int bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS); |
| 226 | static int bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS); |
| 227 | static int bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS); |
| 228 | static int bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS); |
| 229 | static int bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *, |
| 230 | int, int, uint32_t); |
| 231 | |
| 232 | static int bnx_msi_enable = 1; |
| 233 | TUNABLE_INT("hw.bnx.msi.enable", &bnx_msi_enable); |
| 234 | |
| 235 | static device_method_t bnx_methods[] = { |
| 236 | /* Device interface */ |
| 237 | DEVMETHOD(device_probe, bnx_probe), |
| 238 | DEVMETHOD(device_attach, bnx_attach), |
| 239 | DEVMETHOD(device_detach, bnx_detach), |
| 240 | DEVMETHOD(device_shutdown, bnx_shutdown), |
| 241 | DEVMETHOD(device_suspend, bnx_suspend), |
| 242 | DEVMETHOD(device_resume, bnx_resume), |
| 243 | |
| 244 | /* bus interface */ |
| 245 | DEVMETHOD(bus_print_child, bus_generic_print_child), |
| 246 | DEVMETHOD(bus_driver_added, bus_generic_driver_added), |
| 247 | |
| 248 | /* MII interface */ |
| 249 | DEVMETHOD(miibus_readreg, bnx_miibus_readreg), |
| 250 | DEVMETHOD(miibus_writereg, bnx_miibus_writereg), |
| 251 | DEVMETHOD(miibus_statchg, bnx_miibus_statchg), |
| 252 | |
| 253 | { 0, 0 } |
| 254 | }; |
| 255 | |
| 256 | static DEFINE_CLASS_0(bnx, bnx_driver, bnx_methods, sizeof(struct bnx_softc)); |
| 257 | static devclass_t bnx_devclass; |
| 258 | |
| 259 | DECLARE_DUMMY_MODULE(if_bnx); |
| 260 | DRIVER_MODULE(if_bnx, pci, bnx_driver, bnx_devclass, NULL, NULL); |
| 261 | DRIVER_MODULE(miibus, bnx, miibus_driver, miibus_devclass, NULL, NULL); |
| 262 | |
| 263 | static uint32_t |
| 264 | bnx_readmem_ind(struct bnx_softc *sc, uint32_t off) |
| 265 | { |
| 266 | device_t dev = sc->bnx_dev; |
| 267 | uint32_t val; |
| 268 | |
| 269 | if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 && |
| 270 | off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) |
| 271 | return 0; |
| 272 | |
| 273 | pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); |
| 274 | val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4); |
| 275 | pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); |
| 276 | return (val); |
| 277 | } |
| 278 | |
| 279 | static void |
| 280 | bnx_writemem_ind(struct bnx_softc *sc, uint32_t off, uint32_t val) |
| 281 | { |
| 282 | device_t dev = sc->bnx_dev; |
| 283 | |
| 284 | if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 && |
| 285 | off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) |
| 286 | return; |
| 287 | |
| 288 | pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); |
| 289 | pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); |
| 290 | pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); |
| 291 | } |
| 292 | |
| 293 | #ifdef notdef |
| 294 | static uint32_t |
| 295 | bnx_readreg_ind(struct bnx_softc *sc, uin32_t off) |
| 296 | { |
| 297 | device_t dev = sc->bnx_dev; |
| 298 | |
| 299 | pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); |
| 300 | return(pci_read_config(dev, BGE_PCI_REG_DATA, 4)); |
| 301 | } |
| 302 | #endif |
| 303 | |
| 304 | static void |
| 305 | bnx_writereg_ind(struct bnx_softc *sc, uint32_t off, uint32_t val) |
| 306 | { |
| 307 | device_t dev = sc->bnx_dev; |
| 308 | |
| 309 | pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); |
| 310 | pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); |
| 311 | } |
| 312 | |
| 313 | static void |
| 314 | bnx_writemem_direct(struct bnx_softc *sc, uint32_t off, uint32_t val) |
| 315 | { |
| 316 | CSR_WRITE_4(sc, off, val); |
| 317 | } |
| 318 | |
| 319 | static void |
| 320 | bnx_writembx(struct bnx_softc *sc, int off, int val) |
| 321 | { |
| 322 | if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) |
| 323 | off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; |
| 324 | |
| 325 | CSR_WRITE_4(sc, off, val); |
| 326 | } |
| 327 | |
| 328 | static uint8_t |
| 329 | bnx_nvram_getbyte(struct bnx_softc *sc, int addr, uint8_t *dest) |
| 330 | { |
| 331 | uint32_t access, byte = 0; |
| 332 | int i; |
| 333 | |
| 334 | /* Lock. */ |
| 335 | CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); |
| 336 | for (i = 0; i < 8000; i++) { |
| 337 | if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) |
| 338 | break; |
| 339 | DELAY(20); |
| 340 | } |
| 341 | if (i == 8000) |
| 342 | return (1); |
| 343 | |
| 344 | /* Enable access. */ |
| 345 | access = CSR_READ_4(sc, BGE_NVRAM_ACCESS); |
| 346 | CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE); |
| 347 | |
| 348 | CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc); |
| 349 | CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD); |
| 350 | for (i = 0; i < BNX_TIMEOUT * 10; i++) { |
| 351 | DELAY(10); |
| 352 | if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) { |
| 353 | DELAY(10); |
| 354 | break; |
| 355 | } |
| 356 | } |
| 357 | |
| 358 | if (i == BNX_TIMEOUT * 10) { |
| 359 | if_printf(&sc->arpcom.ac_if, "nvram read timed out\n"); |
| 360 | return (1); |
| 361 | } |
| 362 | |
| 363 | /* Get result. */ |
| 364 | byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA); |
| 365 | |
| 366 | *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF; |
| 367 | |
| 368 | /* Disable access. */ |
| 369 | CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access); |
| 370 | |
| 371 | /* Unlock. */ |
| 372 | CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1); |
| 373 | CSR_READ_4(sc, BGE_NVRAM_SWARB); |
| 374 | |
| 375 | return (0); |
| 376 | } |
| 377 | |
| 378 | /* |
| 379 | * Read a sequence of bytes from NVRAM. |
| 380 | */ |
| 381 | static int |
| 382 | bnx_read_nvram(struct bnx_softc *sc, caddr_t dest, int off, int cnt) |
| 383 | { |
| 384 | int err = 0, i; |
| 385 | uint8_t byte = 0; |
| 386 | |
| 387 | if (sc->bnx_asicrev != BGE_ASICREV_BCM5906) |
| 388 | return (1); |
| 389 | |
| 390 | for (i = 0; i < cnt; i++) { |
| 391 | err = bnx_nvram_getbyte(sc, off + i, &byte); |
| 392 | if (err) |
| 393 | break; |
| 394 | *(dest + i) = byte; |
| 395 | } |
| 396 | |
| 397 | return (err ? 1 : 0); |
| 398 | } |
| 399 | |
| 400 | /* |
| 401 | * Read a byte of data stored in the EEPROM at address 'addr.' The |
| 402 | * BCM570x supports both the traditional bitbang interface and an |
| 403 | * auto access interface for reading the EEPROM. We use the auto |
| 404 | * access method. |
| 405 | */ |
| 406 | static uint8_t |
| 407 | bnx_eeprom_getbyte(struct bnx_softc *sc, uint32_t addr, uint8_t *dest) |
| 408 | { |
| 409 | int i; |
| 410 | uint32_t byte = 0; |
| 411 | |
| 412 | /* |
| 413 | * Enable use of auto EEPROM access so we can avoid |
| 414 | * having to use the bitbang method. |
| 415 | */ |
| 416 | BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); |
| 417 | |
| 418 | /* Reset the EEPROM, load the clock period. */ |
| 419 | CSR_WRITE_4(sc, BGE_EE_ADDR, |
| 420 | BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); |
| 421 | DELAY(20); |
| 422 | |
| 423 | /* Issue the read EEPROM command. */ |
| 424 | CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); |
| 425 | |
| 426 | /* Wait for completion */ |
| 427 | for(i = 0; i < BNX_TIMEOUT * 10; i++) { |
| 428 | DELAY(10); |
| 429 | if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) |
| 430 | break; |
| 431 | } |
| 432 | |
| 433 | if (i == BNX_TIMEOUT) { |
| 434 | if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n"); |
| 435 | return(1); |
| 436 | } |
| 437 | |
| 438 | /* Get result. */ |
| 439 | byte = CSR_READ_4(sc, BGE_EE_DATA); |
| 440 | |
| 441 | *dest = (byte >> ((addr % 4) * 8)) & 0xFF; |
| 442 | |
| 443 | return(0); |
| 444 | } |
| 445 | |
| 446 | /* |
| 447 | * Read a sequence of bytes from the EEPROM. |
| 448 | */ |
| 449 | static int |
| 450 | bnx_read_eeprom(struct bnx_softc *sc, caddr_t dest, uint32_t off, size_t len) |
| 451 | { |
| 452 | size_t i; |
| 453 | int err; |
| 454 | uint8_t byte; |
| 455 | |
| 456 | for (byte = 0, err = 0, i = 0; i < len; i++) { |
| 457 | err = bnx_eeprom_getbyte(sc, off + i, &byte); |
| 458 | if (err) |
| 459 | break; |
| 460 | *(dest + i) = byte; |
| 461 | } |
| 462 | |
| 463 | return(err ? 1 : 0); |
| 464 | } |
| 465 | |
| 466 | static int |
| 467 | bnx_miibus_readreg(device_t dev, int phy, int reg) |
| 468 | { |
| 469 | struct bnx_softc *sc = device_get_softc(dev); |
| 470 | uint32_t val; |
| 471 | int i; |
| 472 | |
| 473 | KASSERT(phy == sc->bnx_phyno, |
| 474 | ("invalid phyno %d, should be %d", phy, sc->bnx_phyno)); |
| 475 | |
| 476 | /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */ |
| 477 | if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { |
| 478 | CSR_WRITE_4(sc, BGE_MI_MODE, |
| 479 | sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL); |
| 480 | DELAY(80); |
| 481 | } |
| 482 | |
| 483 | CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | |
| 484 | BGE_MIPHY(phy) | BGE_MIREG(reg)); |
| 485 | |
| 486 | /* Poll for the PHY register access to complete. */ |
| 487 | for (i = 0; i < BNX_TIMEOUT; i++) { |
| 488 | DELAY(10); |
| 489 | val = CSR_READ_4(sc, BGE_MI_COMM); |
| 490 | if ((val & BGE_MICOMM_BUSY) == 0) { |
| 491 | DELAY(5); |
| 492 | val = CSR_READ_4(sc, BGE_MI_COMM); |
| 493 | break; |
| 494 | } |
| 495 | } |
| 496 | if (i == BNX_TIMEOUT) { |
| 497 | if_printf(&sc->arpcom.ac_if, "PHY read timed out " |
| 498 | "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val); |
| 499 | val = 0; |
| 500 | } |
| 501 | |
| 502 | /* Restore the autopoll bit if necessary. */ |
| 503 | if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { |
| 504 | CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode); |
| 505 | DELAY(80); |
| 506 | } |
| 507 | |
| 508 | if (val & BGE_MICOMM_READFAIL) |
| 509 | return 0; |
| 510 | |
| 511 | return (val & 0xFFFF); |
| 512 | } |
| 513 | |
| 514 | static int |
| 515 | bnx_miibus_writereg(device_t dev, int phy, int reg, int val) |
| 516 | { |
| 517 | struct bnx_softc *sc = device_get_softc(dev); |
| 518 | int i; |
| 519 | |
| 520 | KASSERT(phy == sc->bnx_phyno, |
| 521 | ("invalid phyno %d, should be %d", phy, sc->bnx_phyno)); |
| 522 | |
| 523 | if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 && |
| 524 | (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL)) |
| 525 | return 0; |
| 526 | |
| 527 | /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */ |
| 528 | if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { |
| 529 | CSR_WRITE_4(sc, BGE_MI_MODE, |
| 530 | sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL); |
| 531 | DELAY(80); |
| 532 | } |
| 533 | |
| 534 | CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | |
| 535 | BGE_MIPHY(phy) | BGE_MIREG(reg) | val); |
| 536 | |
| 537 | for (i = 0; i < BNX_TIMEOUT; i++) { |
| 538 | DELAY(10); |
| 539 | if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) { |
| 540 | DELAY(5); |
| 541 | CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */ |
| 542 | break; |
| 543 | } |
| 544 | } |
| 545 | if (i == BNX_TIMEOUT) { |
| 546 | if_printf(&sc->arpcom.ac_if, "PHY write timed out " |
| 547 | "(phy %d, reg %d, val %d)\n", phy, reg, val); |
| 548 | } |
| 549 | |
| 550 | /* Restore the autopoll bit if necessary. */ |
| 551 | if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { |
| 552 | CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode); |
| 553 | DELAY(80); |
| 554 | } |
| 555 | |
| 556 | return 0; |
| 557 | } |
| 558 | |
| 559 | static void |
| 560 | bnx_miibus_statchg(device_t dev) |
| 561 | { |
| 562 | struct bnx_softc *sc; |
| 563 | struct mii_data *mii; |
| 564 | |
| 565 | sc = device_get_softc(dev); |
| 566 | mii = device_get_softc(sc->bnx_miibus); |
| 567 | |
| 568 | if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == |
| 569 | (IFM_ACTIVE | IFM_AVALID)) { |
| 570 | switch (IFM_SUBTYPE(mii->mii_media_active)) { |
| 571 | case IFM_10_T: |
| 572 | case IFM_100_TX: |
| 573 | sc->bnx_link = 1; |
| 574 | break; |
| 575 | case IFM_1000_T: |
| 576 | case IFM_1000_SX: |
| 577 | case IFM_2500_SX: |
| 578 | if (sc->bnx_asicrev != BGE_ASICREV_BCM5906) |
| 579 | sc->bnx_link = 1; |
| 580 | else |
| 581 | sc->bnx_link = 0; |
| 582 | break; |
| 583 | default: |
| 584 | sc->bnx_link = 0; |
| 585 | break; |
| 586 | } |
| 587 | } else { |
| 588 | sc->bnx_link = 0; |
| 589 | } |
| 590 | if (sc->bnx_link == 0) |
| 591 | return; |
| 592 | |
| 593 | BNX_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); |
| 594 | if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || |
| 595 | IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) { |
| 596 | BNX_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); |
| 597 | } else { |
| 598 | BNX_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); |
| 599 | } |
| 600 | |
| 601 | if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { |
| 602 | BNX_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); |
| 603 | } else { |
| 604 | BNX_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); |
| 605 | } |
| 606 | } |
| 607 | |
| 608 | /* |
| 609 | * Memory management for jumbo frames. |
| 610 | */ |
| 611 | static int |
| 612 | bnx_alloc_jumbo_mem(struct bnx_softc *sc) |
| 613 | { |
| 614 | struct ifnet *ifp = &sc->arpcom.ac_if; |
| 615 | struct bnx_jslot *entry; |
| 616 | uint8_t *ptr; |
| 617 | bus_addr_t paddr; |
| 618 | int i, error; |
| 619 | |
| 620 | /* |
| 621 | * Create tag for jumbo mbufs. |
| 622 | * This is really a bit of a kludge. We allocate a special |
| 623 | * jumbo buffer pool which (thanks to the way our DMA |
| 624 | * memory allocation works) will consist of contiguous |
| 625 | * pages. This means that even though a jumbo buffer might |
| 626 | * be larger than a page size, we don't really need to |
| 627 | * map it into more than one DMA segment. However, the |
| 628 | * default mbuf tag will result in multi-segment mappings, |
| 629 | * so we have to create a special jumbo mbuf tag that |
| 630 | * lets us get away with mapping the jumbo buffers as |
| 631 | * a single segment. I think eventually the driver should |
| 632 | * be changed so that it uses ordinary mbufs and cluster |
| 633 | * buffers, i.e. jumbo frames can span multiple DMA |
| 634 | * descriptors. But that's a project for another day. |
| 635 | */ |
| 636 | |
| 637 | /* |
| 638 | * Create DMA stuffs for jumbo RX ring. |
| 639 | */ |
| 640 | error = bnx_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ, |
| 641 | &sc->bnx_cdata.bnx_rx_jumbo_ring_tag, |
| 642 | &sc->bnx_cdata.bnx_rx_jumbo_ring_map, |
| 643 | (void *)&sc->bnx_ldata.bnx_rx_jumbo_ring, |
| 644 | &sc->bnx_ldata.bnx_rx_jumbo_ring_paddr); |
| 645 | if (error) { |
| 646 | if_printf(ifp, "could not create jumbo RX ring\n"); |
| 647 | return error; |
| 648 | } |
| 649 | |
| 650 | /* |
| 651 | * Create DMA stuffs for jumbo buffer block. |
| 652 | */ |
| 653 | error = bnx_dma_block_alloc(sc, BNX_JMEM, |
| 654 | &sc->bnx_cdata.bnx_jumbo_tag, |
| 655 | &sc->bnx_cdata.bnx_jumbo_map, |
| 656 | (void **)&sc->bnx_ldata.bnx_jumbo_buf, |
| 657 | &paddr); |
| 658 | if (error) { |
| 659 | if_printf(ifp, "could not create jumbo buffer\n"); |
| 660 | return error; |
| 661 | } |
| 662 | |
| 663 | SLIST_INIT(&sc->bnx_jfree_listhead); |
| 664 | |
| 665 | /* |
| 666 | * Now divide it up into 9K pieces and save the addresses |
| 667 | * in an array. Note that we play an evil trick here by using |
| 668 | * the first few bytes in the buffer to hold the the address |
| 669 | * of the softc structure for this interface. This is because |
| 670 | * bnx_jfree() needs it, but it is called by the mbuf management |
| 671 | * code which will not pass it to us explicitly. |
| 672 | */ |
| 673 | for (i = 0, ptr = sc->bnx_ldata.bnx_jumbo_buf; i < BNX_JSLOTS; i++) { |
| 674 | entry = &sc->bnx_cdata.bnx_jslots[i]; |
| 675 | entry->bnx_sc = sc; |
| 676 | entry->bnx_buf = ptr; |
| 677 | entry->bnx_paddr = paddr; |
| 678 | entry->bnx_inuse = 0; |
| 679 | entry->bnx_slot = i; |
| 680 | SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead, entry, jslot_link); |
| 681 | |
| 682 | ptr += BNX_JLEN; |
| 683 | paddr += BNX_JLEN; |
| 684 | } |
| 685 | return 0; |
| 686 | } |
| 687 | |
| 688 | static void |
| 689 | bnx_free_jumbo_mem(struct bnx_softc *sc) |
| 690 | { |
| 691 | /* Destroy jumbo RX ring. */ |
| 692 | bnx_dma_block_free(sc->bnx_cdata.bnx_rx_jumbo_ring_tag, |
| 693 | sc->bnx_cdata.bnx_rx_jumbo_ring_map, |
| 694 | sc->bnx_ldata.bnx_rx_jumbo_ring); |
| 695 | |
| 696 | /* Destroy jumbo buffer block. */ |
| 697 | bnx_dma_block_free(sc->bnx_cdata.bnx_jumbo_tag, |
| 698 | sc->bnx_cdata.bnx_jumbo_map, |
| 699 | sc->bnx_ldata.bnx_jumbo_buf); |
| 700 | } |
| 701 | |
| 702 | /* |
| 703 | * Allocate a jumbo buffer. |
| 704 | */ |
| 705 | static struct bnx_jslot * |
| 706 | bnx_jalloc(struct bnx_softc *sc) |
| 707 | { |
| 708 | struct bnx_jslot *entry; |
| 709 | |
| 710 | lwkt_serialize_enter(&sc->bnx_jslot_serializer); |
| 711 | entry = SLIST_FIRST(&sc->bnx_jfree_listhead); |
| 712 | if (entry) { |
| 713 | SLIST_REMOVE_HEAD(&sc->bnx_jfree_listhead, jslot_link); |
| 714 | entry->bnx_inuse = 1; |
| 715 | } else { |
| 716 | if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n"); |
| 717 | } |
| 718 | lwkt_serialize_exit(&sc->bnx_jslot_serializer); |
| 719 | return(entry); |
| 720 | } |
| 721 | |
| 722 | /* |
| 723 | * Adjust usage count on a jumbo buffer. |
| 724 | */ |
| 725 | static void |
| 726 | bnx_jref(void *arg) |
| 727 | { |
| 728 | struct bnx_jslot *entry = (struct bnx_jslot *)arg; |
| 729 | struct bnx_softc *sc = entry->bnx_sc; |
| 730 | |
| 731 | if (sc == NULL) |
| 732 | panic("bnx_jref: can't find softc pointer!"); |
| 733 | |
| 734 | if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) { |
| 735 | panic("bnx_jref: asked to reference buffer " |
| 736 | "that we don't manage!"); |
| 737 | } else if (entry->bnx_inuse == 0) { |
| 738 | panic("bnx_jref: buffer already free!"); |
| 739 | } else { |
| 740 | atomic_add_int(&entry->bnx_inuse, 1); |
| 741 | } |
| 742 | } |
| 743 | |
| 744 | /* |
| 745 | * Release a jumbo buffer. |
| 746 | */ |
| 747 | static void |
| 748 | bnx_jfree(void *arg) |
| 749 | { |
| 750 | struct bnx_jslot *entry = (struct bnx_jslot *)arg; |
| 751 | struct bnx_softc *sc = entry->bnx_sc; |
| 752 | |
| 753 | if (sc == NULL) |
| 754 | panic("bnx_jfree: can't find softc pointer!"); |
| 755 | |
| 756 | if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) { |
| 757 | panic("bnx_jfree: asked to free buffer that we don't manage!"); |
| 758 | } else if (entry->bnx_inuse == 0) { |
| 759 | panic("bnx_jfree: buffer already free!"); |
| 760 | } else { |
| 761 | /* |
| 762 | * Possible MP race to 0, use the serializer. The atomic insn |
| 763 | * is still needed for races against bnx_jref(). |
| 764 | */ |
| 765 | lwkt_serialize_enter(&sc->bnx_jslot_serializer); |
| 766 | atomic_subtract_int(&entry->bnx_inuse, 1); |
| 767 | if (entry->bnx_inuse == 0) { |
| 768 | SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead, |
| 769 | entry, jslot_link); |
| 770 | } |
| 771 | lwkt_serialize_exit(&sc->bnx_jslot_serializer); |
| 772 | } |
| 773 | } |
| 774 | |
| 775 | |
| 776 | /* |
| 777 | * Intialize a standard receive ring descriptor. |
| 778 | */ |
| 779 | static int |
| 780 | bnx_newbuf_std(struct bnx_softc *sc, int i, int init) |
| 781 | { |
| 782 | struct mbuf *m_new = NULL; |
| 783 | bus_dma_segment_t seg; |
| 784 | bus_dmamap_t map; |
| 785 | int error, nsegs; |
| 786 | |
| 787 | m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); |
| 788 | if (m_new == NULL) |
| 789 | return ENOBUFS; |
| 790 | m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; |
| 791 | m_adj(m_new, ETHER_ALIGN); |
| 792 | |
| 793 | error = bus_dmamap_load_mbuf_segment(sc->bnx_cdata.bnx_rx_mtag, |
| 794 | sc->bnx_cdata.bnx_rx_tmpmap, m_new, |
| 795 | &seg, 1, &nsegs, BUS_DMA_NOWAIT); |
| 796 | if (error) { |
| 797 | m_freem(m_new); |
| 798 | return error; |
| 799 | } |
| 800 | |
| 801 | if (!init) { |
| 802 | bus_dmamap_sync(sc->bnx_cdata.bnx_rx_mtag, |
| 803 | sc->bnx_cdata.bnx_rx_std_dmamap[i], |
| 804 | BUS_DMASYNC_POSTREAD); |
| 805 | bus_dmamap_unload(sc->bnx_cdata.bnx_rx_mtag, |
| 806 | sc->bnx_cdata.bnx_rx_std_dmamap[i]); |
| 807 | } |
| 808 | |
| 809 | map = sc->bnx_cdata.bnx_rx_tmpmap; |
| 810 | sc->bnx_cdata.bnx_rx_tmpmap = sc->bnx_cdata.bnx_rx_std_dmamap[i]; |
| 811 | sc->bnx_cdata.bnx_rx_std_dmamap[i] = map; |
| 812 | |
| 813 | sc->bnx_cdata.bnx_rx_std_chain[i].bnx_mbuf = m_new; |
| 814 | sc->bnx_cdata.bnx_rx_std_chain[i].bnx_paddr = seg.ds_addr; |
| 815 | |
| 816 | bnx_setup_rxdesc_std(sc, i); |
| 817 | return 0; |
| 818 | } |
| 819 | |
| 820 | static void |
| 821 | bnx_setup_rxdesc_std(struct bnx_softc *sc, int i) |
| 822 | { |
| 823 | struct bnx_rxchain *rc; |
| 824 | struct bge_rx_bd *r; |
| 825 | |
| 826 | rc = &sc->bnx_cdata.bnx_rx_std_chain[i]; |
| 827 | r = &sc->bnx_ldata.bnx_rx_std_ring[i]; |
| 828 | |
| 829 | r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_paddr); |
| 830 | r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_paddr); |
| 831 | r->bge_len = rc->bnx_mbuf->m_len; |
| 832 | r->bge_idx = i; |
| 833 | r->bge_flags = BGE_RXBDFLAG_END; |
| 834 | } |
| 835 | |
| 836 | /* |
| 837 | * Initialize a jumbo receive ring descriptor. This allocates |
| 838 | * a jumbo buffer from the pool managed internally by the driver. |
| 839 | */ |
| 840 | static int |
| 841 | bnx_newbuf_jumbo(struct bnx_softc *sc, int i, int init) |
| 842 | { |
| 843 | struct mbuf *m_new = NULL; |
| 844 | struct bnx_jslot *buf; |
| 845 | bus_addr_t paddr; |
| 846 | |
| 847 | /* Allocate the mbuf. */ |
| 848 | MGETHDR(m_new, init ? MB_WAIT : MB_DONTWAIT, MT_DATA); |
| 849 | if (m_new == NULL) |
| 850 | return ENOBUFS; |
| 851 | |
| 852 | /* Allocate the jumbo buffer */ |
| 853 | buf = bnx_jalloc(sc); |
| 854 | if (buf == NULL) { |
| 855 | m_freem(m_new); |
| 856 | return ENOBUFS; |
| 857 | } |
| 858 | |
| 859 | /* Attach the buffer to the mbuf. */ |
| 860 | m_new->m_ext.ext_arg = buf; |
| 861 | m_new->m_ext.ext_buf = buf->bnx_buf; |
| 862 | m_new->m_ext.ext_free = bnx_jfree; |
| 863 | m_new->m_ext.ext_ref = bnx_jref; |
| 864 | m_new->m_ext.ext_size = BNX_JUMBO_FRAMELEN; |
| 865 | |
| 866 | m_new->m_flags |= M_EXT; |
| 867 | |
| 868 | m_new->m_data = m_new->m_ext.ext_buf; |
| 869 | m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size; |
| 870 | |
| 871 | paddr = buf->bnx_paddr; |
| 872 | m_adj(m_new, ETHER_ALIGN); |
| 873 | paddr += ETHER_ALIGN; |
| 874 | |
| 875 | /* Save necessary information */ |
| 876 | sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_mbuf = m_new; |
| 877 | sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_paddr = paddr; |
| 878 | |
| 879 | /* Set up the descriptor. */ |
| 880 | bnx_setup_rxdesc_jumbo(sc, i); |
| 881 | return 0; |
| 882 | } |
| 883 | |
| 884 | static void |
| 885 | bnx_setup_rxdesc_jumbo(struct bnx_softc *sc, int i) |
| 886 | { |
| 887 | struct bge_rx_bd *r; |
| 888 | struct bnx_rxchain *rc; |
| 889 | |
| 890 | r = &sc->bnx_ldata.bnx_rx_jumbo_ring[i]; |
| 891 | rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i]; |
| 892 | |
| 893 | r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_paddr); |
| 894 | r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_paddr); |
| 895 | r->bge_len = rc->bnx_mbuf->m_len; |
| 896 | r->bge_idx = i; |
| 897 | r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; |
| 898 | } |
| 899 | |
| 900 | static int |
| 901 | bnx_init_rx_ring_std(struct bnx_softc *sc) |
| 902 | { |
| 903 | int i, error; |
| 904 | |
| 905 | for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { |
| 906 | error = bnx_newbuf_std(sc, i, 1); |
| 907 | if (error) |
| 908 | return error; |
| 909 | }; |
| 910 | |
| 911 | sc->bnx_std = BGE_STD_RX_RING_CNT - 1; |
| 912 | bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bnx_std); |
| 913 | |
| 914 | return(0); |
| 915 | } |
| 916 | |
| 917 | static void |
| 918 | bnx_free_rx_ring_std(struct bnx_softc *sc) |
| 919 | { |
| 920 | int i; |
| 921 | |
| 922 | for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { |
| 923 | struct bnx_rxchain *rc = &sc->bnx_cdata.bnx_rx_std_chain[i]; |
| 924 | |
| 925 | if (rc->bnx_mbuf != NULL) { |
| 926 | bus_dmamap_unload(sc->bnx_cdata.bnx_rx_mtag, |
| 927 | sc->bnx_cdata.bnx_rx_std_dmamap[i]); |
| 928 | m_freem(rc->bnx_mbuf); |
| 929 | rc->bnx_mbuf = NULL; |
| 930 | } |
| 931 | bzero(&sc->bnx_ldata.bnx_rx_std_ring[i], |
| 932 | sizeof(struct bge_rx_bd)); |
| 933 | } |
| 934 | } |
| 935 | |
| 936 | static int |
| 937 | bnx_init_rx_ring_jumbo(struct bnx_softc *sc) |
| 938 | { |
| 939 | struct bge_rcb *rcb; |
| 940 | int i, error; |
| 941 | |
| 942 | for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { |
| 943 | error = bnx_newbuf_jumbo(sc, i, 1); |
| 944 | if (error) |
| 945 | return error; |
| 946 | }; |
| 947 | |
| 948 | sc->bnx_jumbo = BGE_JUMBO_RX_RING_CNT - 1; |
| 949 | |
| 950 | rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb; |
| 951 | rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0); |
| 952 | CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); |
| 953 | |
| 954 | bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo); |
| 955 | |
| 956 | return(0); |
| 957 | } |
| 958 | |
| 959 | static void |
| 960 | bnx_free_rx_ring_jumbo(struct bnx_softc *sc) |
| 961 | { |
| 962 | int i; |
| 963 | |
| 964 | for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { |
| 965 | struct bnx_rxchain *rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i]; |
| 966 | |
| 967 | if (rc->bnx_mbuf != NULL) { |
| 968 | m_freem(rc->bnx_mbuf); |
| 969 | rc->bnx_mbuf = NULL; |
| 970 | } |
| 971 | bzero(&sc->bnx_ldata.bnx_rx_jumbo_ring[i], |
| 972 | sizeof(struct bge_rx_bd)); |
| 973 | } |
| 974 | } |
| 975 | |
| 976 | static void |
| 977 | bnx_free_tx_ring(struct bnx_softc *sc) |
| 978 | { |
| 979 | int i; |
| 980 | |
| 981 | for (i = 0; i < BGE_TX_RING_CNT; i++) { |
| 982 | if (sc->bnx_cdata.bnx_tx_chain[i] != NULL) { |
| 983 | bus_dmamap_unload(sc->bnx_cdata.bnx_tx_mtag, |
| 984 | sc->bnx_cdata.bnx_tx_dmamap[i]); |
| 985 | m_freem(sc->bnx_cdata.bnx_tx_chain[i]); |
| 986 | sc->bnx_cdata.bnx_tx_chain[i] = NULL; |
| 987 | } |
| 988 | bzero(&sc->bnx_ldata.bnx_tx_ring[i], |
| 989 | sizeof(struct bge_tx_bd)); |
| 990 | } |
| 991 | } |
| 992 | |
| 993 | static int |
| 994 | bnx_init_tx_ring(struct bnx_softc *sc) |
| 995 | { |
| 996 | sc->bnx_txcnt = 0; |
| 997 | sc->bnx_tx_saved_considx = 0; |
| 998 | sc->bnx_tx_prodidx = 0; |
| 999 | |
| 1000 | /* Initialize transmit producer index for host-memory send ring. */ |
| 1001 | bnx_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bnx_tx_prodidx); |
| 1002 | bnx_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); |
| 1003 | |
| 1004 | return(0); |
| 1005 | } |
| 1006 | |
| 1007 | static void |
| 1008 | bnx_setmulti(struct bnx_softc *sc) |
| 1009 | { |
| 1010 | struct ifnet *ifp; |
| 1011 | struct ifmultiaddr *ifma; |
| 1012 | uint32_t hashes[4] = { 0, 0, 0, 0 }; |
| 1013 | int h, i; |
| 1014 | |
| 1015 | ifp = &sc->arpcom.ac_if; |
| 1016 | |
| 1017 | if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { |
| 1018 | for (i = 0; i < 4; i++) |
| 1019 | CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); |
| 1020 | return; |
| 1021 | } |
| 1022 | |
| 1023 | /* First, zot all the existing filters. */ |
| 1024 | for (i = 0; i < 4; i++) |
| 1025 | CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); |
| 1026 | |
| 1027 | /* Now program new ones. */ |
| 1028 | TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { |
| 1029 | if (ifma->ifma_addr->sa_family != AF_LINK) |
| 1030 | continue; |
| 1031 | h = ether_crc32_le( |
| 1032 | LLADDR((struct sockaddr_dl *)ifma->ifma_addr), |
| 1033 | ETHER_ADDR_LEN) & 0x7f; |
| 1034 | hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); |
| 1035 | } |
| 1036 | |
| 1037 | for (i = 0; i < 4; i++) |
| 1038 | CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); |
| 1039 | } |
| 1040 | |
| 1041 | /* |
| 1042 | * Do endian, PCI and DMA initialization. Also check the on-board ROM |
| 1043 | * self-test results. |
| 1044 | */ |
| 1045 | static int |
| 1046 | bnx_chipinit(struct bnx_softc *sc) |
| 1047 | { |
| 1048 | uint32_t dma_rw_ctl, mode_ctl; |
| 1049 | int i; |
| 1050 | |
| 1051 | /* Set endian type before we access any non-PCI registers. */ |
| 1052 | pci_write_config(sc->bnx_dev, BGE_PCI_MISC_CTL, |
| 1053 | BGE_INIT | BGE_PCIMISCCTL_TAGGED_STATUS, 4); |
| 1054 | |
| 1055 | /* Clear the MAC control register */ |
| 1056 | CSR_WRITE_4(sc, BGE_MAC_MODE, 0); |
| 1057 | |
| 1058 | /* |
| 1059 | * Clear the MAC statistics block in the NIC's |
| 1060 | * internal memory. |
| 1061 | */ |
| 1062 | for (i = BGE_STATS_BLOCK; |
| 1063 | i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) |
| 1064 | BNX_MEMWIN_WRITE(sc, i, 0); |
| 1065 | |
| 1066 | for (i = BGE_STATUS_BLOCK; |
| 1067 | i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) |
| 1068 | BNX_MEMWIN_WRITE(sc, i, 0); |
| 1069 | |
| 1070 | if (BNX_IS_57765_FAMILY(sc)) { |
| 1071 | uint32_t val; |
| 1072 | |
| 1073 | if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) { |
| 1074 | mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); |
| 1075 | val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS; |
| 1076 | |
| 1077 | /* Access the lower 1K of PL PCI-E block registers. */ |
| 1078 | CSR_WRITE_4(sc, BGE_MODE_CTL, |
| 1079 | val | BGE_MODECTL_PCIE_PL_SEL); |
| 1080 | |
| 1081 | val = CSR_READ_4(sc, BGE_PCIE_PL_LO_PHYCTL5); |
| 1082 | val |= BGE_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ; |
| 1083 | CSR_WRITE_4(sc, BGE_PCIE_PL_LO_PHYCTL5, val); |
| 1084 | |
| 1085 | CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); |
| 1086 | } |
| 1087 | if (sc->bnx_chiprev != BGE_CHIPREV_57765_AX) { |
| 1088 | mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); |
| 1089 | val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS; |
| 1090 | |
| 1091 | /* Access the lower 1K of DL PCI-E block registers. */ |
| 1092 | CSR_WRITE_4(sc, BGE_MODE_CTL, |
| 1093 | val | BGE_MODECTL_PCIE_DL_SEL); |
| 1094 | |
| 1095 | val = CSR_READ_4(sc, BGE_PCIE_DL_LO_FTSMAX); |
| 1096 | val &= ~BGE_PCIE_DL_LO_FTSMAX_MASK; |
| 1097 | val |= BGE_PCIE_DL_LO_FTSMAX_VAL; |
| 1098 | CSR_WRITE_4(sc, BGE_PCIE_DL_LO_FTSMAX, val); |
| 1099 | |
| 1100 | CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); |
| 1101 | } |
| 1102 | |
| 1103 | val = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK); |
| 1104 | val &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK; |
| 1105 | val |= BGE_CPMU_LSPD_10MB_MACCLK_6_25; |
| 1106 | CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, val); |
| 1107 | } |
| 1108 | |
| 1109 | /* |
| 1110 | * Set up the PCI DMA control register. |
| 1111 | */ |
| 1112 | dma_rw_ctl = pci_read_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, 4); |
| 1113 | /* |
| 1114 | * Disable 32bytes cache alignment for DMA write to host memory |
| 1115 | * |
| 1116 | * NOTE: |
| 1117 | * 64bytes cache alignment for DMA write to host memory is still |
| 1118 | * enabled. |
| 1119 | */ |
| 1120 | dma_rw_ctl |= BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT; |
| 1121 | if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) |
| 1122 | dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK; |
| 1123 | /* |
| 1124 | * Enable HW workaround for controllers that misinterpret |
| 1125 | * a status tag update and leave interrupts permanently |
| 1126 | * disabled. |
| 1127 | */ |
| 1128 | if (sc->bnx_asicrev != BGE_ASICREV_BCM5717 && |
| 1129 | !BNX_IS_57765_FAMILY(sc)) |
| 1130 | dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA; |
| 1131 | if (bootverbose) { |
| 1132 | if_printf(&sc->arpcom.ac_if, "DMA read/write %#x\n", |
| 1133 | dma_rw_ctl); |
| 1134 | } |
| 1135 | pci_write_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); |
| 1136 | |
| 1137 | /* |
| 1138 | * Set up general mode register. |
| 1139 | */ |
| 1140 | mode_ctl = bnx_dma_swap_options(sc) | BGE_MODECTL_MAC_ATTN_INTR | |
| 1141 | BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM; |
| 1142 | CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); |
| 1143 | |
| 1144 | /* |
| 1145 | * Disable memory write invalidate. Apparently it is not supported |
| 1146 | * properly by these devices. Also ensure that INTx isn't disabled, |
| 1147 | * as these chips need it even when using MSI. |
| 1148 | */ |
| 1149 | PCI_CLRBIT(sc->bnx_dev, BGE_PCI_CMD, |
| 1150 | (PCIM_CMD_MWRICEN | PCIM_CMD_INTxDIS), 4); |
| 1151 | |
| 1152 | /* Set the timer prescaler (always 66Mhz) */ |
| 1153 | CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); |
| 1154 | |
| 1155 | if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) { |
| 1156 | DELAY(40); /* XXX */ |
| 1157 | |
| 1158 | /* Put PHY into ready state */ |
| 1159 | BNX_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ); |
| 1160 | CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */ |
| 1161 | DELAY(40); |
| 1162 | } |
| 1163 | |
| 1164 | return(0); |
| 1165 | } |
| 1166 | |
| 1167 | static int |
| 1168 | bnx_blockinit(struct bnx_softc *sc) |
| 1169 | { |
| 1170 | struct bge_rcb *rcb; |
| 1171 | bus_size_t vrcb; |
| 1172 | bge_hostaddr taddr; |
| 1173 | uint32_t val; |
| 1174 | int i, limit; |
| 1175 | |
| 1176 | /* |
| 1177 | * Initialize the memory window pointer register so that |
| 1178 | * we can access the first 32K of internal NIC RAM. This will |
| 1179 | * allow us to set up the TX send ring RCBs and the RX return |
| 1180 | * ring RCBs, plus other things which live in NIC memory. |
| 1181 | */ |
| 1182 | CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); |
| 1183 | |
| 1184 | /* Configure mbuf pool watermarks */ |
| 1185 | if (BNX_IS_57765_PLUS(sc)) { |
| 1186 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); |
| 1187 | if (sc->arpcom.ac_if.if_mtu > ETHERMTU) { |
| 1188 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e); |
| 1189 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea); |
| 1190 | } else { |
| 1191 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a); |
| 1192 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0); |
| 1193 | } |
| 1194 | } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) { |
| 1195 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); |
| 1196 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04); |
| 1197 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10); |
| 1198 | } else { |
| 1199 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); |
| 1200 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); |
| 1201 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); |
| 1202 | } |
| 1203 | |
| 1204 | /* Configure DMA resource watermarks */ |
| 1205 | CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); |
| 1206 | CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); |
| 1207 | |
| 1208 | /* Enable buffer manager */ |
| 1209 | val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN; |
| 1210 | /* |
| 1211 | * Change the arbitration algorithm of TXMBUF read request to |
| 1212 | * round-robin instead of priority based for BCM5719. When |
| 1213 | * TXFIFO is almost empty, RDMA will hold its request until |
| 1214 | * TXFIFO is not almost empty. |
| 1215 | */ |
| 1216 | if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) |
| 1217 | val |= BGE_BMANMODE_NO_TX_UNDERRUN; |
| 1218 | if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 || |
| 1219 | sc->bnx_chipid == BGE_CHIPID_BCM5719_A0 || |
| 1220 | sc->bnx_chipid == BGE_CHIPID_BCM5720_A0) |
| 1221 | val |= BGE_BMANMODE_LOMBUF_ATTN; |
| 1222 | CSR_WRITE_4(sc, BGE_BMAN_MODE, val); |
| 1223 | |
| 1224 | /* Poll for buffer manager start indication */ |
| 1225 | for (i = 0; i < BNX_TIMEOUT; i++) { |
| 1226 | if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) |
| 1227 | break; |
| 1228 | DELAY(10); |
| 1229 | } |
| 1230 | |
| 1231 | if (i == BNX_TIMEOUT) { |
| 1232 | if_printf(&sc->arpcom.ac_if, |
| 1233 | "buffer manager failed to start\n"); |
| 1234 | return(ENXIO); |
| 1235 | } |
| 1236 | |
| 1237 | /* Enable flow-through queues */ |
| 1238 | CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); |
| 1239 | CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); |
| 1240 | |
| 1241 | /* Wait until queue initialization is complete */ |
| 1242 | for (i = 0; i < BNX_TIMEOUT; i++) { |
| 1243 | if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) |
| 1244 | break; |
| 1245 | DELAY(10); |
| 1246 | } |
| 1247 | |
| 1248 | if (i == BNX_TIMEOUT) { |
| 1249 | if_printf(&sc->arpcom.ac_if, |
| 1250 | "flow-through queue init failed\n"); |
| 1251 | return(ENXIO); |
| 1252 | } |
| 1253 | |
| 1254 | /* |
| 1255 | * Summary of rings supported by the controller: |
| 1256 | * |
| 1257 | * Standard Receive Producer Ring |
| 1258 | * - This ring is used to feed receive buffers for "standard" |
| 1259 | * sized frames (typically 1536 bytes) to the controller. |
| 1260 | * |
| 1261 | * Jumbo Receive Producer Ring |
| 1262 | * - This ring is used to feed receive buffers for jumbo sized |
| 1263 | * frames (i.e. anything bigger than the "standard" frames) |
| 1264 | * to the controller. |
| 1265 | * |
| 1266 | * Mini Receive Producer Ring |
| 1267 | * - This ring is used to feed receive buffers for "mini" |
| 1268 | * sized frames to the controller. |
| 1269 | * - This feature required external memory for the controller |
| 1270 | * but was never used in a production system. Should always |
| 1271 | * be disabled. |
| 1272 | * |
| 1273 | * Receive Return Ring |
| 1274 | * - After the controller has placed an incoming frame into a |
| 1275 | * receive buffer that buffer is moved into a receive return |
| 1276 | * ring. The driver is then responsible to passing the |
| 1277 | * buffer up to the stack. Many versions of the controller |
| 1278 | * support multiple RR rings. |
| 1279 | * |
| 1280 | * Send Ring |
| 1281 | * - This ring is used for outgoing frames. Many versions of |
| 1282 | * the controller support multiple send rings. |
| 1283 | */ |
| 1284 | |
| 1285 | /* Initialize the standard receive producer ring control block. */ |
| 1286 | rcb = &sc->bnx_ldata.bnx_info.bnx_std_rx_rcb; |
| 1287 | rcb->bge_hostaddr.bge_addr_lo = |
| 1288 | BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_std_ring_paddr); |
| 1289 | rcb->bge_hostaddr.bge_addr_hi = |
| 1290 | BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_std_ring_paddr); |
| 1291 | if (BNX_IS_57765_PLUS(sc)) { |
| 1292 | /* |
| 1293 | * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32) |
| 1294 | * Bits 15-2 : Maximum RX frame size |
| 1295 | * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled |
| 1296 | * Bit 0 : Reserved |
| 1297 | */ |
| 1298 | rcb->bge_maxlen_flags = |
| 1299 | BGE_RCB_MAXLEN_FLAGS(512, BNX_MAX_FRAMELEN << 2); |
| 1300 | } else { |
| 1301 | /* |
| 1302 | * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32) |
| 1303 | * Bits 15-2 : Reserved (should be 0) |
| 1304 | * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled |
| 1305 | * Bit 0 : Reserved |
| 1306 | */ |
| 1307 | rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); |
| 1308 | } |
| 1309 | if (BNX_IS_5717_PLUS(sc)) |
| 1310 | rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717; |
| 1311 | else |
| 1312 | rcb->bge_nicaddr = BGE_STD_RX_RINGS; |
| 1313 | /* Write the standard receive producer ring control block. */ |
| 1314 | CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); |
| 1315 | CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); |
| 1316 | CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); |
| 1317 | CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); |
| 1318 | /* Reset the standard receive producer ring producer index. */ |
| 1319 | bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); |
| 1320 | |
| 1321 | /* |
| 1322 | * Initialize the jumbo RX producer ring control |
| 1323 | * block. We set the 'ring disabled' bit in the |
| 1324 | * flags field until we're actually ready to start |
| 1325 | * using this ring (i.e. once we set the MTU |
| 1326 | * high enough to require it). |
| 1327 | */ |
| 1328 | if (BNX_IS_JUMBO_CAPABLE(sc)) { |
| 1329 | rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb; |
| 1330 | /* Get the jumbo receive producer ring RCB parameters. */ |
| 1331 | rcb->bge_hostaddr.bge_addr_lo = |
| 1332 | BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr); |
| 1333 | rcb->bge_hostaddr.bge_addr_hi = |
| 1334 | BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr); |
| 1335 | rcb->bge_maxlen_flags = |
| 1336 | BGE_RCB_MAXLEN_FLAGS(BNX_MAX_FRAMELEN, |
| 1337 | BGE_RCB_FLAG_RING_DISABLED); |
| 1338 | if (BNX_IS_5717_PLUS(sc)) |
| 1339 | rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717; |
| 1340 | else |
| 1341 | rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; |
| 1342 | CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, |
| 1343 | rcb->bge_hostaddr.bge_addr_hi); |
| 1344 | CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, |
| 1345 | rcb->bge_hostaddr.bge_addr_lo); |
| 1346 | /* Program the jumbo receive producer ring RCB parameters. */ |
| 1347 | CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, |
| 1348 | rcb->bge_maxlen_flags); |
| 1349 | CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); |
| 1350 | /* Reset the jumbo receive producer ring producer index. */ |
| 1351 | bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); |
| 1352 | } |
| 1353 | |
| 1354 | /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */ |
| 1355 | if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 && |
| 1356 | (sc->bnx_chipid == BGE_CHIPID_BCM5906_A0 || |
| 1357 | sc->bnx_chipid == BGE_CHIPID_BCM5906_A1 || |
| 1358 | sc->bnx_chipid == BGE_CHIPID_BCM5906_A2)) { |
| 1359 | CSR_WRITE_4(sc, BGE_ISO_PKT_TX, |
| 1360 | (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2); |
| 1361 | } |
| 1362 | |
| 1363 | /* |
| 1364 | * The BD ring replenish thresholds control how often the |
| 1365 | * hardware fetches new BD's from the producer rings in host |
| 1366 | * memory. Setting the value too low on a busy system can |
| 1367 | * starve the hardware and recue the throughpout. |
| 1368 | * |
| 1369 | * Set the BD ring replentish thresholds. The recommended |
| 1370 | * values are 1/8th the number of descriptors allocated to |
| 1371 | * each ring. |
| 1372 | */ |
| 1373 | val = 8; |
| 1374 | CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val); |
| 1375 | if (BNX_IS_JUMBO_CAPABLE(sc)) { |
| 1376 | CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, |
| 1377 | BGE_JUMBO_RX_RING_CNT/8); |
| 1378 | } |
| 1379 | if (BNX_IS_57765_PLUS(sc)) { |
| 1380 | CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32); |
| 1381 | CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16); |
| 1382 | } |
| 1383 | |
| 1384 | /* |
| 1385 | * Disable all send rings by setting the 'ring disabled' bit |
| 1386 | * in the flags field of all the TX send ring control blocks, |
| 1387 | * located in NIC memory. |
| 1388 | */ |
| 1389 | if (BNX_IS_5717_PLUS(sc)) |
| 1390 | limit = 4; |
| 1391 | else if (BNX_IS_57765_FAMILY(sc)) |
| 1392 | limit = 2; |
| 1393 | else |
| 1394 | limit = 1; |
| 1395 | vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; |
| 1396 | for (i = 0; i < limit; i++) { |
| 1397 | RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, |
| 1398 | BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); |
| 1399 | RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); |
| 1400 | vrcb += sizeof(struct bge_rcb); |
| 1401 | } |
| 1402 | |
| 1403 | /* Configure send ring RCB 0 (we use only the first ring) */ |
| 1404 | vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; |
| 1405 | BGE_HOSTADDR(taddr, sc->bnx_ldata.bnx_tx_ring_paddr); |
| 1406 | RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); |
| 1407 | RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); |
| 1408 | if (BNX_IS_5717_PLUS(sc)) { |
| 1409 | RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717); |
| 1410 | } else { |
| 1411 | RCB_WRITE_4(sc, vrcb, bge_nicaddr, |
| 1412 | BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); |
| 1413 | } |
| 1414 | RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, |
| 1415 | BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); |
| 1416 | |
| 1417 | /* |
| 1418 | * Disable all receive return rings by setting the |
| 1419 | * 'ring disabled' bit in the flags field of all the receive |
| 1420 | * return ring control blocks, located in NIC memory. |
| 1421 | */ |
| 1422 | if (BNX_IS_5717_PLUS(sc)) { |
| 1423 | /* Should be 17, use 16 until we get an SRAM map. */ |
| 1424 | limit = 16; |
| 1425 | } else if (BNX_IS_57765_FAMILY(sc)) { |
| 1426 | limit = 4; |
| 1427 | } else { |
| 1428 | limit = 1; |
| 1429 | } |
| 1430 | /* Disable all receive return rings. */ |
| 1431 | vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; |
| 1432 | for (i = 0; i < limit; i++) { |
| 1433 | RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); |
| 1434 | RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); |
| 1435 | RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, |
| 1436 | BGE_RCB_FLAG_RING_DISABLED); |
| 1437 | RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); |
| 1438 | bnx_writembx(sc, BGE_MBX_RX_CONS0_LO + |
| 1439 | (i * (sizeof(uint64_t))), 0); |
| 1440 | vrcb += sizeof(struct bge_rcb); |
| 1441 | } |
| 1442 | |
| 1443 | /* |
| 1444 | * Set up receive return ring 0. Note that the NIC address |
| 1445 | * for RX return rings is 0x0. The return rings live entirely |
| 1446 | * within the host, so the nicaddr field in the RCB isn't used. |
| 1447 | */ |
| 1448 | vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; |
| 1449 | BGE_HOSTADDR(taddr, sc->bnx_ldata.bnx_rx_return_ring_paddr); |
| 1450 | RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); |
| 1451 | RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); |
| 1452 | RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); |
| 1453 | RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, |
| 1454 | BGE_RCB_MAXLEN_FLAGS(sc->bnx_return_ring_cnt, 0)); |
| 1455 | |
| 1456 | /* Set random backoff seed for TX */ |
| 1457 | CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, |
| 1458 | sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + |
| 1459 | sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + |
| 1460 | sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] + |
| 1461 | BGE_TX_BACKOFF_SEED_MASK); |
| 1462 | |
| 1463 | /* Set inter-packet gap */ |
| 1464 | val = 0x2620; |
| 1465 | if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) { |
| 1466 | val |= CSR_READ_4(sc, BGE_TX_LENGTHS) & |
| 1467 | (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK); |
| 1468 | } |
| 1469 | CSR_WRITE_4(sc, BGE_TX_LENGTHS, val); |
| 1470 | |
| 1471 | /* |
| 1472 | * Specify which ring to use for packets that don't match |
| 1473 | * any RX rules. |
| 1474 | */ |
| 1475 | CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); |
| 1476 | |
| 1477 | /* |
| 1478 | * Configure number of RX lists. One interrupt distribution |
| 1479 | * list, sixteen active lists, one bad frames class. |
| 1480 | */ |
| 1481 | CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); |
| 1482 | |
| 1483 | /* Inialize RX list placement stats mask. */ |
| 1484 | CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); |
| 1485 | CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); |
| 1486 | |
| 1487 | /* Disable host coalescing until we get it set up */ |
| 1488 | CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); |
| 1489 | |
| 1490 | /* Poll to make sure it's shut down. */ |
| 1491 | for (i = 0; i < BNX_TIMEOUT; i++) { |
| 1492 | if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) |
| 1493 | break; |
| 1494 | DELAY(10); |
| 1495 | } |
| 1496 | |
| 1497 | if (i == BNX_TIMEOUT) { |
| 1498 | if_printf(&sc->arpcom.ac_if, |
| 1499 | "host coalescing engine failed to idle\n"); |
| 1500 | return(ENXIO); |
| 1501 | } |
| 1502 | |
| 1503 | /* Set up host coalescing defaults */ |
| 1504 | CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bnx_rx_coal_ticks); |
| 1505 | CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bnx_tx_coal_ticks); |
| 1506 | CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bnx_rx_coal_bds); |
| 1507 | CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bnx_tx_coal_bds); |
| 1508 | CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, sc->bnx_rx_coal_bds_int); |
| 1509 | CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, sc->bnx_tx_coal_bds_int); |
| 1510 | |
| 1511 | /* Set up address of status block */ |
| 1512 | bzero(sc->bnx_ldata.bnx_status_block, BGE_STATUS_BLK_SZ); |
| 1513 | CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, |
| 1514 | BGE_ADDR_HI(sc->bnx_ldata.bnx_status_block_paddr)); |
| 1515 | CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, |
| 1516 | BGE_ADDR_LO(sc->bnx_ldata.bnx_status_block_paddr)); |
| 1517 | |
| 1518 | /* Set up status block partail update size. */ |
| 1519 | val = BGE_STATBLKSZ_32BYTE; |
| 1520 | #if 0 |
| 1521 | /* |
| 1522 | * Does not seem to have visible effect in both |
| 1523 | * bulk data (1472B UDP datagram) and tiny data |
| 1524 | * (18B UDP datagram) TX tests. |
| 1525 | */ |
| 1526 | val |= BGE_HCCMODE_CLRTICK_TX; |
| 1527 | #endif |
| 1528 | /* Turn on host coalescing state machine */ |
| 1529 | CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE); |
| 1530 | |
| 1531 | /* Turn on RX BD completion state machine and enable attentions */ |
| 1532 | CSR_WRITE_4(sc, BGE_RBDC_MODE, |
| 1533 | BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); |
| 1534 | |
| 1535 | /* Turn on RX list placement state machine */ |
| 1536 | CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); |
| 1537 | |
| 1538 | val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB | |
| 1539 | BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR | |
| 1540 | BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB | |
| 1541 | BGE_MACMODE_FRMHDR_DMA_ENB; |
| 1542 | |
| 1543 | if (sc->bnx_flags & BNX_FLAG_TBI) |
| 1544 | val |= BGE_PORTMODE_TBI; |
| 1545 | else if (sc->bnx_flags & BNX_FLAG_MII_SERDES) |
| 1546 | val |= BGE_PORTMODE_GMII; |
| 1547 | else |
| 1548 | val |= BGE_PORTMODE_MII; |
| 1549 | |
| 1550 | /* Turn on DMA, clear stats */ |
| 1551 | CSR_WRITE_4(sc, BGE_MAC_MODE, val); |
| 1552 | |
| 1553 | /* Set misc. local control, enable interrupts on attentions */ |
| 1554 | CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); |
| 1555 | |
| 1556 | #ifdef notdef |
| 1557 | /* Assert GPIO pins for PHY reset */ |
| 1558 | BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| |
| 1559 | BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); |
| 1560 | BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| |
| 1561 | BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); |
| 1562 | #endif |
| 1563 | |
| 1564 | /* Turn on write DMA state machine */ |
| 1565 | val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS; |
| 1566 | /* Enable host coalescing bug fix. */ |
| 1567 | val |= BGE_WDMAMODE_STATUS_TAG_FIX; |
| 1568 | if (sc->bnx_asicrev == BGE_ASICREV_BCM5785) { |
| 1569 | /* Request larger DMA burst size to get better performance. */ |
| 1570 | val |= BGE_WDMAMODE_BURST_ALL_DATA; |
| 1571 | } |
| 1572 | CSR_WRITE_4(sc, BGE_WDMA_MODE, val); |
| 1573 | DELAY(40); |
| 1574 | |
| 1575 | if (BNX_IS_57765_PLUS(sc)) { |
| 1576 | uint32_t dmactl; |
| 1577 | |
| 1578 | dmactl = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL); |
| 1579 | /* |
| 1580 | * Adjust tx margin to prevent TX data corruption and |
| 1581 | * fix internal FIFO overflow. |
| 1582 | */ |
| 1583 | if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 || |
| 1584 | sc->bnx_asicrev == BGE_ASICREV_BCM5720) { |
| 1585 | dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK | |
| 1586 | BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK | |
| 1587 | BGE_RDMA_RSRVCTRL_TXMRGN_MASK); |
| 1588 | dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K | |
| 1589 | BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K | |
| 1590 | BGE_RDMA_RSRVCTRL_TXMRGN_320B; |
| 1591 | } |
| 1592 | /* |
| 1593 | * Enable fix for read DMA FIFO overruns. |
| 1594 | * The fix is to limit the number of RX BDs |
| 1595 | * the hardware would fetch at a fime. |
| 1596 | */ |
| 1597 | CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL, |
| 1598 | dmactl | BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX); |
| 1599 | } |
| 1600 | |
| 1601 | if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) { |
| 1602 | CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, |
| 1603 | CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | |
| 1604 | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | |
| 1605 | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); |
| 1606 | } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) { |
| 1607 | /* |
| 1608 | * Allow 4KB burst length reads for non-LSO frames. |
| 1609 | * Enable 512B burst length reads for buffer descriptors. |
| 1610 | */ |
| 1611 | CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, |
| 1612 | CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | |
| 1613 | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 | |
| 1614 | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); |
| 1615 | } |
| 1616 | |
| 1617 | /* Turn on read DMA state machine */ |
| 1618 | val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; |
| 1619 | if (sc->bnx_asicrev == BGE_ASICREV_BCM5717) |
| 1620 | val |= BGE_RDMAMODE_MULT_DMA_RD_DIS; |
| 1621 | if (sc->bnx_asicrev == BGE_ASICREV_BCM5784 || |
| 1622 | sc->bnx_asicrev == BGE_ASICREV_BCM5785 || |
| 1623 | sc->bnx_asicrev == BGE_ASICREV_BCM57780) { |
| 1624 | val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | |
| 1625 | BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | |
| 1626 | BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; |
| 1627 | } |
| 1628 | if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) { |
| 1629 | val |= CSR_READ_4(sc, BGE_RDMA_MODE) & |
| 1630 | BGE_RDMAMODE_H2BNC_VLAN_DET; |
| 1631 | /* |
| 1632 | * Allow multiple outstanding read requests from |
| 1633 | * non-LSO read DMA engine. |
| 1634 | */ |
| 1635 | val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS; |
| 1636 | } |
| 1637 | if (sc->bnx_flags & BNX_FLAG_TSO) |
| 1638 | val |= BGE_RDMAMODE_TSO4_ENABLE; |
| 1639 | val |= BGE_RDMAMODE_FIFO_LONG_BURST; |
| 1640 | CSR_WRITE_4(sc, BGE_RDMA_MODE, val); |
| 1641 | DELAY(40); |
| 1642 | |
| 1643 | /* Turn on RX data completion state machine */ |
| 1644 | CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); |
| 1645 | |
| 1646 | /* Turn on RX BD initiator state machine */ |
| 1647 | CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); |
| 1648 | |
| 1649 | /* Turn on RX data and RX BD initiator state machine */ |
| 1650 | CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); |
| 1651 | |
| 1652 | /* Turn on send BD completion state machine */ |
| 1653 | CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); |
| 1654 | |
| 1655 | /* Turn on send data completion state machine */ |
| 1656 | val = BGE_SDCMODE_ENABLE; |
| 1657 | if (sc->bnx_asicrev == BGE_ASICREV_BCM5761) |
| 1658 | val |= BGE_SDCMODE_CDELAY; |
| 1659 | CSR_WRITE_4(sc, BGE_SDC_MODE, val); |
| 1660 | |
| 1661 | /* Turn on send data initiator state machine */ |
| 1662 | if (sc->bnx_flags & BNX_FLAG_TSO) { |
| 1663 | CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | |
| 1664 | BGE_SDIMODE_HW_LSO_PRE_DMA); |
| 1665 | } else { |
| 1666 | CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); |
| 1667 | } |
| 1668 | |
| 1669 | /* Turn on send BD initiator state machine */ |
| 1670 | CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); |
| 1671 | |
| 1672 | /* Turn on send BD selector state machine */ |
| 1673 | CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); |
| 1674 | |
| 1675 | CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); |
| 1676 | CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, |
| 1677 | BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); |
| 1678 | |
| 1679 | /* ack/clear link change events */ |
| 1680 | CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| |
| 1681 | BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| |
| 1682 | BGE_MACSTAT_LINK_CHANGED); |
| 1683 | CSR_WRITE_4(sc, BGE_MI_STS, 0); |
| 1684 | |
| 1685 | /* |
| 1686 | * Enable attention when the link has changed state for |
| 1687 | * devices that use auto polling. |
| 1688 | */ |
| 1689 | if (sc->bnx_flags & BNX_FLAG_TBI) { |
| 1690 | CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); |
| 1691 | } else { |
| 1692 | if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { |
| 1693 | CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode); |
| 1694 | DELAY(80); |
| 1695 | } |
| 1696 | } |
| 1697 | |
| 1698 | /* |
| 1699 | * Clear any pending link state attention. |
| 1700 | * Otherwise some link state change events may be lost until attention |
| 1701 | * is cleared by bnx_intr() -> bnx_softc.bnx_link_upd() sequence. |
| 1702 | * It's not necessary on newer BCM chips - perhaps enabling link |
| 1703 | * state change attentions implies clearing pending attention. |
| 1704 | */ |
| 1705 | CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| |
| 1706 | BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| |
| 1707 | BGE_MACSTAT_LINK_CHANGED); |
| 1708 | |
| 1709 | /* Enable link state change attentions. */ |
| 1710 | BNX_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); |
| 1711 | |
| 1712 | return(0); |
| 1713 | } |
| 1714 | |
| 1715 | /* |
| 1716 | * Probe for a Broadcom chip. Check the PCI vendor and device IDs |
| 1717 | * against our list and return its name if we find a match. Note |
| 1718 | * that since the Broadcom controller contains VPD support, we |
| 1719 | * can get the device name string from the controller itself instead |
| 1720 | * of the compiled-in string. This is a little slow, but it guarantees |
| 1721 | * we'll always announce the right product name. |
| 1722 | */ |
| 1723 | static int |
| 1724 | bnx_probe(device_t dev) |
| 1725 | { |
| 1726 | const struct bnx_type *t; |
| 1727 | uint16_t product, vendor; |
| 1728 | |
| 1729 | if (!pci_is_pcie(dev)) |
| 1730 | return ENXIO; |
| 1731 | |
| 1732 | product = pci_get_device(dev); |
| 1733 | vendor = pci_get_vendor(dev); |
| 1734 | |
| 1735 | for (t = bnx_devs; t->bnx_name != NULL; t++) { |
| 1736 | if (vendor == t->bnx_vid && product == t->bnx_did) |
| 1737 | break; |
| 1738 | } |
| 1739 | if (t->bnx_name == NULL) |
| 1740 | return ENXIO; |
| 1741 | |
| 1742 | device_set_desc(dev, t->bnx_name); |
| 1743 | return 0; |
| 1744 | } |
| 1745 | |
| 1746 | static int |
| 1747 | bnx_attach(device_t dev) |
| 1748 | { |
| 1749 | struct ifnet *ifp; |
| 1750 | struct bnx_softc *sc; |
| 1751 | uint32_t hwcfg = 0, misccfg; |
| 1752 | int error = 0, rid, capmask; |
| 1753 | uint8_t ether_addr[ETHER_ADDR_LEN]; |
| 1754 | uint16_t product, vendor; |
| 1755 | driver_intr_t *intr_func; |
| 1756 | uintptr_t mii_priv = 0; |
| 1757 | u_int intr_flags; |
| 1758 | #ifdef BNX_TSO_DEBUG |
| 1759 | char desc[32]; |
| 1760 | int i; |
| 1761 | #endif |
| 1762 | |
| 1763 | sc = device_get_softc(dev); |
| 1764 | sc->bnx_dev = dev; |
| 1765 | callout_init_mp(&sc->bnx_stat_timer); |
| 1766 | callout_init_mp(&sc->bnx_intr_timer); |
| 1767 | lwkt_serialize_init(&sc->bnx_jslot_serializer); |
| 1768 | |
| 1769 | product = pci_get_device(dev); |
| 1770 | vendor = pci_get_vendor(dev); |
| 1771 | |
| 1772 | #ifndef BURN_BRIDGES |
| 1773 | if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { |
| 1774 | uint32_t irq, mem; |
| 1775 | |
| 1776 | irq = pci_read_config(dev, PCIR_INTLINE, 4); |
| 1777 | mem = pci_read_config(dev, BGE_PCI_BAR0, 4); |
| 1778 | |
| 1779 | device_printf(dev, "chip is in D%d power mode " |
| 1780 | "-- setting to D0\n", pci_get_powerstate(dev)); |
| 1781 | |
| 1782 | pci_set_powerstate(dev, PCI_POWERSTATE_D0); |
| 1783 | |
| 1784 | pci_write_config(dev, PCIR_INTLINE, irq, 4); |
| 1785 | pci_write_config(dev, BGE_PCI_BAR0, mem, 4); |
| 1786 | } |
| 1787 | #endif /* !BURN_BRIDGE */ |
| 1788 | |
| 1789 | /* |
| 1790 | * Map control/status registers. |
| 1791 | */ |
| 1792 | pci_enable_busmaster(dev); |
| 1793 | |
| 1794 | rid = BGE_PCI_BAR0; |
| 1795 | sc->bnx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, |
| 1796 | RF_ACTIVE); |
| 1797 | |
| 1798 | if (sc->bnx_res == NULL) { |
| 1799 | device_printf(dev, "couldn't map memory\n"); |
| 1800 | return ENXIO; |
| 1801 | } |
| 1802 | |
| 1803 | sc->bnx_btag = rman_get_bustag(sc->bnx_res); |
| 1804 | sc->bnx_bhandle = rman_get_bushandle(sc->bnx_res); |
| 1805 | |
| 1806 | /* Save various chip information */ |
| 1807 | sc->bnx_chipid = |
| 1808 | pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> |
| 1809 | BGE_PCIMISCCTL_ASICREV_SHIFT; |
| 1810 | if (BGE_ASICREV(sc->bnx_chipid) == BGE_ASICREV_USE_PRODID_REG) { |
| 1811 | /* All chips having dedicated ASICREV register have CPMU */ |
| 1812 | sc->bnx_flags |= BNX_FLAG_CPMU; |
| 1813 | |
| 1814 | switch (product) { |
| 1815 | case PCI_PRODUCT_BROADCOM_BCM5717: |
| 1816 | case PCI_PRODUCT_BROADCOM_BCM5718: |
| 1817 | case PCI_PRODUCT_BROADCOM_BCM5719: |
| 1818 | case PCI_PRODUCT_BROADCOM_BCM5720_ALT: |
| 1819 | sc->bnx_chipid = pci_read_config(dev, |
| 1820 | BGE_PCI_GEN2_PRODID_ASICREV, 4); |
| 1821 | break; |
| 1822 | |
| 1823 | case PCI_PRODUCT_BROADCOM_BCM57761: |
| 1824 | case PCI_PRODUCT_BROADCOM_BCM57762: |
| 1825 | case PCI_PRODUCT_BROADCOM_BCM57765: |
| 1826 | case PCI_PRODUCT_BROADCOM_BCM57766: |
| 1827 | case PCI_PRODUCT_BROADCOM_BCM57781: |
| 1828 | case PCI_PRODUCT_BROADCOM_BCM57782: |
| 1829 | case PCI_PRODUCT_BROADCOM_BCM57785: |
| 1830 | case PCI_PRODUCT_BROADCOM_BCM57786: |
| 1831 | case PCI_PRODUCT_BROADCOM_BCM57791: |
| 1832 | case PCI_PRODUCT_BROADCOM_BCM57795: |
| 1833 | sc->bnx_chipid = pci_read_config(dev, |
| 1834 | BGE_PCI_GEN15_PRODID_ASICREV, 4); |
| 1835 | break; |
| 1836 | |
| 1837 | default: |
| 1838 | sc->bnx_chipid = pci_read_config(dev, |
| 1839 | BGE_PCI_PRODID_ASICREV, 4); |
| 1840 | break; |
| 1841 | } |
| 1842 | } |
| 1843 | sc->bnx_asicrev = BGE_ASICREV(sc->bnx_chipid); |
| 1844 | sc->bnx_chiprev = BGE_CHIPREV(sc->bnx_chipid); |
| 1845 | |
| 1846 | switch (sc->bnx_asicrev) { |
| 1847 | case BGE_ASICREV_BCM5717: |
| 1848 | case BGE_ASICREV_BCM5719: |
| 1849 | case BGE_ASICREV_BCM5720: |
| 1850 | sc->bnx_flags |= BNX_FLAG_5717_PLUS | BNX_FLAG_57765_PLUS; |
| 1851 | break; |
| 1852 | |
| 1853 | case BGE_ASICREV_BCM57765: |
| 1854 | case BGE_ASICREV_BCM57766: |
| 1855 | sc->bnx_flags |= BNX_FLAG_57765_FAMILY | BNX_FLAG_57765_PLUS; |
| 1856 | break; |
| 1857 | } |
| 1858 | sc->bnx_flags |= BNX_FLAG_SHORTDMA; |
| 1859 | |
| 1860 | sc->bnx_flags |= BNX_FLAG_TSO; |
| 1861 | if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 && |
| 1862 | sc->bnx_chipid == BGE_CHIPID_BCM5719_A0) |
| 1863 | sc->bnx_flags &= ~BNX_FLAG_TSO; |
| 1864 | |
| 1865 | if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 || |
| 1866 | BNX_IS_57765_FAMILY(sc)) { |
| 1867 | /* |
| 1868 | * All BCM57785 and BCM5718 families chips have a bug that |
| 1869 | * under certain situation interrupt will not be enabled |
| 1870 | * even if status tag is written to BGE_MBX_IRQ0_LO mailbox. |
| 1871 | * |
| 1872 | * While BCM5719 and BCM5720 have a hardware workaround |
| 1873 | * which could fix the above bug. |
| 1874 | * See the comment near BGE_PCIDMARWCTL_TAGGED_STATUS_WA in |
| 1875 | * bnx_chipinit(). |
| 1876 | * |
| 1877 | * For the rest of the chips in these two families, we will |
| 1878 | * have to poll the status block at high rate (10ms currently) |
| 1879 | * to check whether the interrupt is hosed or not. |
| 1880 | * See bnx_intr_check() for details. |
| 1881 | */ |
| 1882 | sc->bnx_flags |= BNX_FLAG_STATUSTAG_BUG; |
| 1883 | } |
| 1884 | |
| 1885 | misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK; |
| 1886 | |
| 1887 | sc->bnx_pciecap = pci_get_pciecap_ptr(sc->bnx_dev); |
| 1888 | if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 || |
| 1889 | sc->bnx_asicrev == BGE_ASICREV_BCM5720) |
| 1890 | pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_2048); |
| 1891 | else |
| 1892 | pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096); |
| 1893 | device_printf(dev, "CHIP ID 0x%08x; " |
| 1894 | "ASIC REV 0x%02x; CHIP REV 0x%02x\n", |
| 1895 | sc->bnx_chipid, sc->bnx_asicrev, sc->bnx_chiprev); |
| 1896 | |
| 1897 | /* |
| 1898 | * Set various PHY quirk flags. |
| 1899 | */ |
| 1900 | |
| 1901 | capmask = MII_CAPMASK_DEFAULT; |
| 1902 | if (product == PCI_PRODUCT_BROADCOM_BCM57791 || |
| 1903 | product == PCI_PRODUCT_BROADCOM_BCM57795) { |
| 1904 | /* 10/100 only */ |
| 1905 | capmask &= ~BMSR_EXTSTAT; |
| 1906 | } |
| 1907 | |
| 1908 | mii_priv |= BRGPHY_FLAG_WIRESPEED; |
| 1909 | |
| 1910 | /* |
| 1911 | * Allocate interrupt |
| 1912 | */ |
| 1913 | sc->bnx_irq_type = pci_alloc_1intr(dev, bnx_msi_enable, &sc->bnx_irq_rid, |
| 1914 | &intr_flags); |
| 1915 | |
| 1916 | sc->bnx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->bnx_irq_rid, |
| 1917 | intr_flags); |
| 1918 | if (sc->bnx_irq == NULL) { |
| 1919 | device_printf(dev, "couldn't map interrupt\n"); |
| 1920 | error = ENXIO; |
| 1921 | goto fail; |
| 1922 | } |
| 1923 | |
| 1924 | if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) { |
| 1925 | sc->bnx_flags |= BNX_FLAG_ONESHOT_MSI; |
| 1926 | bnx_enable_msi(sc); |
| 1927 | } |
| 1928 | |
| 1929 | /* Initialize if_name earlier, so if_printf could be used */ |
| 1930 | ifp = &sc->arpcom.ac_if; |
| 1931 | if_initname(ifp, device_get_name(dev), device_get_unit(dev)); |
| 1932 | |
| 1933 | /* Try to reset the chip. */ |
| 1934 | bnx_reset(sc); |
| 1935 | |
| 1936 | if (bnx_chipinit(sc)) { |
| 1937 | device_printf(dev, "chip initialization failed\n"); |
| 1938 | error = ENXIO; |
| 1939 | goto fail; |
| 1940 | } |
| 1941 | |
| 1942 | /* |
| 1943 | * Get station address |
| 1944 | */ |
| 1945 | error = bnx_get_eaddr(sc, ether_addr); |
| 1946 | if (error) { |
| 1947 | device_printf(dev, "failed to read station address\n"); |
| 1948 | goto fail; |
| 1949 | } |
| 1950 | |
| 1951 | if (BNX_IS_57765_PLUS(sc)) { |
| 1952 | sc->bnx_return_ring_cnt = BGE_RETURN_RING_CNT; |
| 1953 | } else { |
| 1954 | /* 5705/5750 limits RX return ring to 512 entries. */ |
| 1955 | sc->bnx_return_ring_cnt = BGE_RETURN_RING_CNT_5705; |
| 1956 | } |
| 1957 | |
| 1958 | error = bnx_dma_alloc(sc); |
| 1959 | if (error) |
| 1960 | goto fail; |
| 1961 | |
| 1962 | /* Set default tuneable values. */ |
| 1963 | sc->bnx_rx_coal_ticks = BNX_RX_COAL_TICKS_DEF; |
| 1964 | sc->bnx_tx_coal_ticks = BNX_TX_COAL_TICKS_DEF; |
| 1965 | sc->bnx_rx_coal_bds = BNX_RX_COAL_BDS_DEF; |
| 1966 | sc->bnx_tx_coal_bds = BNX_TX_COAL_BDS_DEF; |
| 1967 | sc->bnx_rx_coal_bds_int = BNX_RX_COAL_BDS_DEF; |
| 1968 | sc->bnx_tx_coal_bds_int = BNX_TX_COAL_BDS_DEF; |
| 1969 | |
| 1970 | /* Set up ifnet structure */ |
| 1971 | ifp->if_softc = sc; |
| 1972 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
| 1973 | ifp->if_ioctl = bnx_ioctl; |
| 1974 | ifp->if_start = bnx_start; |
| 1975 | #ifdef DEVICE_POLLING |
| 1976 | ifp->if_poll = bnx_poll; |
| 1977 | #endif |
| 1978 | ifp->if_watchdog = bnx_watchdog; |
| 1979 | ifp->if_init = bnx_init; |
| 1980 | ifp->if_mtu = ETHERMTU; |
| 1981 | ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; |
| 1982 | ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1); |
| 1983 | ifq_set_ready(&ifp->if_snd); |
| 1984 | |
| 1985 | ifp->if_capabilities |= IFCAP_HWCSUM; |
| 1986 | ifp->if_hwassist = BNX_CSUM_FEATURES; |
| 1987 | if (sc->bnx_flags & BNX_FLAG_TSO) { |
| 1988 | ifp->if_capabilities |= IFCAP_TSO; |
| 1989 | ifp->if_hwassist |= CSUM_TSO; |
| 1990 | } |
| 1991 | ifp->if_capenable = ifp->if_capabilities; |
| 1992 | |
| 1993 | /* |
| 1994 | * Figure out what sort of media we have by checking the |
| 1995 | * hardware config word in the first 32k of NIC internal memory, |
| 1996 | * or fall back to examining the EEPROM if necessary. |
| 1997 | * Note: on some BCM5700 cards, this value appears to be unset. |
| 1998 | * If that's the case, we have to rely on identifying the NIC |
| 1999 | * by its PCI subsystem ID, as we do below for the SysKonnect |
| 2000 | * SK-9D41. |
| 2001 | */ |
| 2002 | if (bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) { |
| 2003 | hwcfg = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); |
| 2004 | } else { |
| 2005 | if (bnx_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, |
| 2006 | sizeof(hwcfg))) { |
| 2007 | device_printf(dev, "failed to read EEPROM\n"); |
| 2008 | error = ENXIO; |
| 2009 | goto fail; |
| 2010 | } |
| 2011 | hwcfg = ntohl(hwcfg); |
| 2012 | } |
| 2013 | |
| 2014 | /* The SysKonnect SK-9D41 is a 1000baseSX card. */ |
| 2015 | if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41 || |
| 2016 | (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) |
| 2017 | sc->bnx_flags |= BNX_FLAG_TBI; |
| 2018 | |
| 2019 | /* Setup MI MODE */ |
| 2020 | if (sc->bnx_flags & BNX_FLAG_CPMU) |
| 2021 | sc->bnx_mi_mode = BGE_MIMODE_500KHZ_CONST; |
| 2022 | else |
| 2023 | sc->bnx_mi_mode = BGE_MIMODE_BASE; |
| 2024 | |
| 2025 | /* Setup link status update stuffs */ |
| 2026 | if (sc->bnx_flags & BNX_FLAG_TBI) { |
| 2027 | sc->bnx_link_upd = bnx_tbi_link_upd; |
| 2028 | sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED; |
| 2029 | } else if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { |
| 2030 | sc->bnx_link_upd = bnx_autopoll_link_upd; |
| 2031 | sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED; |
| 2032 | } else { |
| 2033 | sc->bnx_link_upd = bnx_copper_link_upd; |
| 2034 | sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED; |
| 2035 | } |
| 2036 | |
| 2037 | /* Set default PHY address */ |
| 2038 | sc->bnx_phyno = 1; |
| 2039 | |
| 2040 | /* |
| 2041 | * PHY address mapping for various devices. |
| 2042 | * |
| 2043 | * | F0 Cu | F0 Sr | F1 Cu | F1 Sr | |
| 2044 | * ---------+-------+-------+-------+-------+ |
| 2045 | * BCM57XX | 1 | X | X | X | |
| 2046 | * BCM5704 | 1 | X | 1 | X | |
| 2047 | * BCM5717 | 1 | 8 | 2 | 9 | |
| 2048 | * BCM5719 | 1 | 8 | 2 | 9 | |
| 2049 | * BCM5720 | 1 | 8 | 2 | 9 | |
| 2050 | * |
| 2051 | * Other addresses may respond but they are not |
| 2052 | * IEEE compliant PHYs and should be ignored. |
| 2053 | */ |
| 2054 | if (BNX_IS_5717_PLUS(sc)) { |
| 2055 | int f; |
| 2056 | |
| 2057 | f = pci_get_function(dev); |
| 2058 | if (sc->bnx_chipid == BGE_CHIPID_BCM5717_A0) { |
| 2059 | if (CSR_READ_4(sc, BGE_SGDIG_STS) & |
| 2060 | BGE_SGDIGSTS_IS_SERDES) |
| 2061 | sc->bnx_phyno = f + 8; |
| 2062 | else |
| 2063 | sc->bnx_phyno = f + 1; |
| 2064 | } else { |
| 2065 | if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) & |
| 2066 | BGE_CPMU_PHY_STRAP_IS_SERDES) |
| 2067 | sc->bnx_phyno = f + 8; |
| 2068 | else |
| 2069 | sc->bnx_phyno = f + 1; |
| 2070 | } |
| 2071 | } |
| 2072 | |
| 2073 | if (sc->bnx_flags & BNX_FLAG_TBI) { |
| 2074 | ifmedia_init(&sc->bnx_ifmedia, IFM_IMASK, |
| 2075 | bnx_ifmedia_upd, bnx_ifmedia_sts); |
| 2076 | ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); |
| 2077 | ifmedia_add(&sc->bnx_ifmedia, |
| 2078 | IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); |
| 2079 | ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); |
| 2080 | ifmedia_set(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO); |
| 2081 | sc->bnx_ifmedia.ifm_media = sc->bnx_ifmedia.ifm_cur->ifm_media; |
| 2082 | } else { |
| 2083 | struct mii_probe_args mii_args; |
| 2084 | |
| 2085 | mii_probe_args_init(&mii_args, bnx_ifmedia_upd, bnx_ifmedia_sts); |
| 2086 | mii_args.mii_probemask = 1 << sc->bnx_phyno; |
| 2087 | mii_args.mii_capmask = capmask; |
| 2088 | mii_args.mii_privtag = MII_PRIVTAG_BRGPHY; |
| 2089 | mii_args.mii_priv = mii_priv; |
| 2090 | |
| 2091 | error = mii_probe(dev, &sc->bnx_miibus, &mii_args); |
| 2092 | if (error) { |
| 2093 | device_printf(dev, "MII without any PHY!\n"); |
| 2094 | goto fail; |
| 2095 | } |
| 2096 | } |
| 2097 | |
| 2098 | /* |
| 2099 | * Create sysctl nodes. |
| 2100 | */ |
| 2101 | sysctl_ctx_init(&sc->bnx_sysctl_ctx); |
| 2102 | sc->bnx_sysctl_tree = SYSCTL_ADD_NODE(&sc->bnx_sysctl_ctx, |
| 2103 | SYSCTL_STATIC_CHILDREN(_hw), |
| 2104 | OID_AUTO, |
| 2105 | device_get_nameunit(dev), |
| 2106 | CTLFLAG_RD, 0, ""); |
| 2107 | if (sc->bnx_sysctl_tree == NULL) { |
| 2108 | device_printf(dev, "can't add sysctl node\n"); |
| 2109 | error = ENXIO; |
| 2110 | goto fail; |
| 2111 | } |
| 2112 | |
| 2113 | SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx, |
| 2114 | SYSCTL_CHILDREN(sc->bnx_sysctl_tree), |
| 2115 | OID_AUTO, "rx_coal_ticks", |
| 2116 | CTLTYPE_INT | CTLFLAG_RW, |
| 2117 | sc, 0, bnx_sysctl_rx_coal_ticks, "I", |
| 2118 | "Receive coalescing ticks (usec)."); |
| 2119 | SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx, |
| 2120 | SYSCTL_CHILDREN(sc->bnx_sysctl_tree), |
| 2121 | OID_AUTO, "tx_coal_ticks", |
| 2122 | CTLTYPE_INT | CTLFLAG_RW, |
| 2123 | sc, 0, bnx_sysctl_tx_coal_ticks, "I", |
| 2124 | "Transmit coalescing ticks (usec)."); |
| 2125 | SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx, |
| 2126 | SYSCTL_CHILDREN(sc->bnx_sysctl_tree), |
| 2127 | OID_AUTO, "rx_coal_bds", |
| 2128 | CTLTYPE_INT | CTLFLAG_RW, |
| 2129 | sc, 0, bnx_sysctl_rx_coal_bds, "I", |
| 2130 | "Receive max coalesced BD count."); |
| 2131 | SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx, |
| 2132 | SYSCTL_CHILDREN(sc->bnx_sysctl_tree), |
| 2133 | OID_AUTO, "tx_coal_bds", |
| 2134 | CTLTYPE_INT | CTLFLAG_RW, |
| 2135 | sc, 0, bnx_sysctl_tx_coal_bds, "I", |
| 2136 | "Transmit max coalesced BD count."); |
| 2137 | /* |
| 2138 | * A common design characteristic for many Broadcom |
| 2139 | * client controllers is that they only support a |
| 2140 | * single outstanding DMA read operation on the PCIe |
| 2141 | * bus. This means that it will take twice as long to |
| 2142 | * fetch a TX frame that is split into header and |
| 2143 | * payload buffers as it does to fetch a single, |
| 2144 | * contiguous TX frame (2 reads vs. 1 read). For these |
| 2145 | * controllers, coalescing buffers to reduce the number |
| 2146 | * of memory reads is effective way to get maximum |
| 2147 | * performance(about 940Mbps). Without collapsing TX |
| 2148 | * buffers the maximum TCP bulk transfer performance |
| 2149 | * is about 850Mbps. However forcing coalescing mbufs |
| 2150 | * consumes a lot of CPU cycles, so leave it off by |
| 2151 | * default. |
| 2152 | */ |
| 2153 | SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx, |
| 2154 | SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, |
| 2155 | "force_defrag", CTLFLAG_RW, &sc->bnx_force_defrag, 0, |
| 2156 | "Force defragment on TX path"); |
| 2157 | |
| 2158 | SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx, |
| 2159 | SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, |
| 2160 | "rx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW, |
| 2161 | sc, 0, bnx_sysctl_rx_coal_bds_int, "I", |
| 2162 | "Receive max coalesced BD count during interrupt."); |
| 2163 | SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx, |
| 2164 | SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, |
| 2165 | "tx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW, |
| 2166 | sc, 0, bnx_sysctl_tx_coal_bds_int, "I", |
| 2167 | "Transmit max coalesced BD count during interrupt."); |
| 2168 | |
| 2169 | #ifdef BNX_TSO_DEBUG |
| 2170 | for (i = 0; i < BNX_TSO_NSTATS; ++i) { |
| 2171 | ksnprintf(desc, sizeof(desc), "tso%d", i + 1); |
| 2172 | SYSCTL_ADD_ULONG(&sc->bnx_sysctl_ctx, |
| 2173 | SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, |
| 2174 | desc, CTLFLAG_RW, &sc->bnx_tsosegs[i], ""); |
| 2175 | } |
| 2176 | #endif |
| 2177 | |
| 2178 | /* |
| 2179 | * Call MI attach routine. |
| 2180 | */ |
| 2181 | ether_ifattach(ifp, ether_addr, NULL); |
| 2182 | |
| 2183 | if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) { |
| 2184 | if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) { |
| 2185 | intr_func = bnx_msi_oneshot; |
| 2186 | if (bootverbose) |
| 2187 | device_printf(dev, "oneshot MSI\n"); |
| 2188 | } else { |
| 2189 | intr_func = bnx_msi; |
| 2190 | } |
| 2191 | } else { |
| 2192 | intr_func = bnx_intr_legacy; |
| 2193 | } |
| 2194 | error = bus_setup_intr(dev, sc->bnx_irq, INTR_MPSAFE, intr_func, sc, |
| 2195 | &sc->bnx_intrhand, ifp->if_serializer); |
| 2196 | if (error) { |
| 2197 | ether_ifdetach(ifp); |
| 2198 | device_printf(dev, "couldn't set up irq\n"); |
| 2199 | goto fail; |
| 2200 | } |
| 2201 | |
| 2202 | ifp->if_cpuid = rman_get_cpuid(sc->bnx_irq); |
| 2203 | KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); |
| 2204 | |
| 2205 | sc->bnx_stat_cpuid = ifp->if_cpuid; |
| 2206 | sc->bnx_intr_cpuid = ifp->if_cpuid; |
| 2207 | |
| 2208 | return(0); |
| 2209 | fail: |
| 2210 | bnx_detach(dev); |
| 2211 | return(error); |
| 2212 | } |
| 2213 | |
| 2214 | static int |
| 2215 | bnx_detach(device_t dev) |
| 2216 | { |
| 2217 | struct bnx_softc *sc = device_get_softc(dev); |
| 2218 | |
| 2219 | if (device_is_attached(dev)) { |
| 2220 | struct ifnet *ifp = &sc->arpcom.ac_if; |
| 2221 | |
| 2222 | lwkt_serialize_enter(ifp->if_serializer); |
| 2223 | bnx_stop(sc); |
| 2224 | bnx_reset(sc); |
| 2225 | bus_teardown_intr(dev, sc->bnx_irq, sc->bnx_intrhand); |
| 2226 | lwkt_serialize_exit(ifp->if_serializer); |
| 2227 | |
| 2228 | ether_ifdetach(ifp); |
| 2229 | } |
| 2230 | |
| 2231 | if (sc->bnx_flags & BNX_FLAG_TBI) |
| 2232 | ifmedia_removeall(&sc->bnx_ifmedia); |
| 2233 | if (sc->bnx_miibus) |
| 2234 | device_delete_child(dev, sc->bnx_miibus); |
| 2235 | bus_generic_detach(dev); |
| 2236 | |
| 2237 | if (sc->bnx_irq != NULL) { |
| 2238 | bus_release_resource(dev, SYS_RES_IRQ, sc->bnx_irq_rid, |
| 2239 | sc->bnx_irq); |
| 2240 | } |
| 2241 | if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) |
| 2242 | pci_release_msi(dev); |
| 2243 | |
| 2244 | if (sc->bnx_res != NULL) { |
| 2245 | bus_release_resource(dev, SYS_RES_MEMORY, |
| 2246 | BGE_PCI_BAR0, sc->bnx_res); |
| 2247 | } |
| 2248 | |
| 2249 | if (sc->bnx_sysctl_tree != NULL) |
| 2250 | sysctl_ctx_free(&sc->bnx_sysctl_ctx); |
| 2251 | |
| 2252 | bnx_dma_free(sc); |
| 2253 | |
| 2254 | return 0; |
| 2255 | } |
| 2256 | |
| 2257 | static void |
| 2258 | bnx_reset(struct bnx_softc *sc) |
| 2259 | { |
| 2260 | device_t dev; |
| 2261 | uint32_t cachesize, command, pcistate, reset; |
| 2262 | void (*write_op)(struct bnx_softc *, uint32_t, uint32_t); |
| 2263 | int i, val = 0; |
| 2264 | uint16_t devctl; |
| 2265 | |
| 2266 | dev = sc->bnx_dev; |
| 2267 | |
| 2268 | if (sc->bnx_asicrev != BGE_ASICREV_BCM5906) |
| 2269 | write_op = bnx_writemem_direct; |
| 2270 | else |
| 2271 | write_op = bnx_writereg_ind; |
| 2272 | |
| 2273 | /* Save some important PCI state. */ |
| 2274 | cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); |
| 2275 | command = pci_read_config(dev, BGE_PCI_CMD, 4); |
| 2276 | pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); |
| 2277 | |
| 2278 | pci_write_config(dev, BGE_PCI_MISC_CTL, |
| 2279 | BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| |
| 2280 | BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW| |
| 2281 | BGE_PCIMISCCTL_TAGGED_STATUS, 4); |
| 2282 | |
| 2283 | /* Disable fastboot on controllers that support it. */ |
| 2284 | if (bootverbose) |
| 2285 | if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n"); |
| 2286 | CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0); |
| 2287 | |
| 2288 | /* |
| 2289 | * Write the magic number to SRAM at offset 0xB50. |
| 2290 | * When firmware finishes its initialization it will |
| 2291 | * write ~BGE_MAGIC_NUMBER to the same location. |
| 2292 | */ |
| 2293 | bnx_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); |
| 2294 | |
| 2295 | reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1); |
| 2296 | |
| 2297 | /* XXX: Broadcom Linux driver. */ |
| 2298 | /* Force PCI-E 1.0a mode */ |
| 2299 | if (!BNX_IS_57765_PLUS(sc) && |
| 2300 | CSR_READ_4(sc, BGE_PCIE_PHY_TSTCTL) == |
| 2301 | (BGE_PCIE_PHY_TSTCTL_PSCRAM | |
| 2302 | BGE_PCIE_PHY_TSTCTL_PCIE10)) { |
| 2303 | CSR_WRITE_4(sc, BGE_PCIE_PHY_TSTCTL, |
| 2304 | BGE_PCIE_PHY_TSTCTL_PSCRAM); |
| 2305 | } |
| 2306 | if (sc->bnx_chipid != BGE_CHIPID_BCM5750_A0) { |
| 2307 | /* Prevent PCIE link training during global reset */ |
| 2308 | CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29)); |
| 2309 | reset |= (1<<29); |
| 2310 | } |
| 2311 | |
| 2312 | /* |
| 2313 | * Set GPHY Power Down Override to leave GPHY |
| 2314 | * powered up in D0 uninitialized. |
| 2315 | */ |
| 2316 | if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0) |
| 2317 | reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE; |
| 2318 | |
| 2319 | /* Issue global reset */ |
| 2320 | write_op(sc, BGE_MISC_CFG, reset); |
| 2321 | |
| 2322 | if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) { |
| 2323 | uint32_t status, ctrl; |
| 2324 | |
| 2325 | status = CSR_READ_4(sc, BGE_VCPU_STATUS); |
| 2326 | CSR_WRITE_4(sc, BGE_VCPU_STATUS, |
| 2327 | status | BGE_VCPU_STATUS_DRV_RESET); |
| 2328 | ctrl = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL); |
| 2329 | CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL, |
| 2330 | ctrl & ~BGE_VCPU_EXT_CTRL_HALT_CPU); |
| 2331 | } |
| 2332 | |
| 2333 | DELAY(1000); |
| 2334 | |
| 2335 | /* XXX: Broadcom Linux driver. */ |
| 2336 | if (sc->bnx_chipid == BGE_CHIPID_BCM5750_A0) { |
| 2337 | uint32_t v; |
| 2338 | |
| 2339 | DELAY(500000); /* wait for link training to complete */ |
| 2340 | v = pci_read_config(dev, 0xc4, 4); |
| 2341 | pci_write_config(dev, 0xc4, v | (1<<15), 4); |
| 2342 | } |
| 2343 | |
| 2344 | devctl = pci_read_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL, 2); |
| 2345 | |
| 2346 | /* Disable no snoop and disable relaxed ordering. */ |
| 2347 | devctl &= ~(PCIEM_DEVCTL_RELAX_ORDER | PCIEM_DEVCTL_NOSNOOP); |
| 2348 | |
| 2349 | /* Old PCI-E chips only support 128 bytes Max PayLoad Size. */ |
| 2350 | if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0) { |
| 2351 | devctl &= ~PCIEM_DEVCTL_MAX_PAYLOAD_MASK; |
| 2352 | devctl |= PCIEM_DEVCTL_MAX_PAYLOAD_128; |
| 2353 | } |
| 2354 | |
| 2355 | pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL, |
| 2356 | devctl, 2); |
| 2357 | |
| 2358 | /* Clear error status. */ |
| 2359 | pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVSTS, |
| 2360 | PCIEM_DEVSTS_CORR_ERR | |
| 2361 | PCIEM_DEVSTS_NFATAL_ERR | |
| 2362 | PCIEM_DEVSTS_FATAL_ERR | |
| 2363 | PCIEM_DEVSTS_UNSUPP_REQ, 2); |
| 2364 | |
| 2365 | /* Reset some of the PCI state that got zapped by reset */ |
| 2366 | pci_write_config(dev, BGE_PCI_MISC_CTL, |
| 2367 | BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| |
| 2368 | BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW| |
| 2369 | BGE_PCIMISCCTL_TAGGED_STATUS, 4); |
| 2370 | pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); |
| 2371 | pci_write_config(dev, BGE_PCI_CMD, command, 4); |
| 2372 | write_op(sc, BGE_MISC_CFG, (65 << 1)); |
| 2373 | |
| 2374 | /* Enable memory arbiter */ |
| 2375 | CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); |
| 2376 | |
| 2377 | if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) { |
| 2378 | for (i = 0; i < BNX_TIMEOUT; i++) { |
| 2379 | val = CSR_READ_4(sc, BGE_VCPU_STATUS); |
| 2380 | if (val & BGE_VCPU_STATUS_INIT_DONE) |
| 2381 | break; |
| 2382 | DELAY(100); |
| 2383 | } |
| 2384 | if (i == BNX_TIMEOUT) { |
| 2385 | if_printf(&sc->arpcom.ac_if, "reset timed out\n"); |
| 2386 | return; |
| 2387 | } |
| 2388 | } else { |
| 2389 | /* |
| 2390 | * Poll until we see the 1's complement of the magic number. |
| 2391 | * This indicates that the firmware initialization |
| 2392 | * is complete. |
| 2393 | */ |
| 2394 | for (i = 0; i < BNX_FIRMWARE_TIMEOUT; i++) { |
| 2395 | val = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); |
| 2396 | if (val == ~BGE_MAGIC_NUMBER) |
| 2397 | break; |
| 2398 | DELAY(10); |
| 2399 | } |
| 2400 | if (i == BNX_FIRMWARE_TIMEOUT) { |
| 2401 | if_printf(&sc->arpcom.ac_if, "firmware handshake " |
| 2402 | "timed out, found 0x%08x\n", val); |
| 2403 | } |
| 2404 | |
| 2405 | /* BCM57765 A0 needs additional time before accessing. */ |
| 2406 | if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) |
| 2407 | DELAY(10 * 1000); |
| 2408 | } |
| 2409 | |
| 2410 | /* |
| 2411 | * XXX Wait for the value of the PCISTATE register to |
| 2412 | * return to its original pre-reset state. This is a |
| 2413 | * fairly good indicator of reset completion. If we don't |
| 2414 | * wait for the reset to fully complete, trying to read |
| 2415 | * from the device's non-PCI registers may yield garbage |
| 2416 | * results. |
| 2417 | */ |
| 2418 | for (i = 0; i < BNX_TIMEOUT; i++) { |
| 2419 | if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate) |
| 2420 | break; |
| 2421 | DELAY(10); |
| 2422 | } |
| 2423 | |
| 2424 | /* Fix up byte swapping */ |
| 2425 | CSR_WRITE_4(sc, BGE_MODE_CTL, bnx_dma_swap_options(sc)); |
| 2426 | |
| 2427 | CSR_WRITE_4(sc, BGE_MAC_MODE, 0); |
| 2428 | |
| 2429 | /* |
| 2430 | * The 5704 in TBI mode apparently needs some special |
| 2431 | * adjustment to insure the SERDES drive level is set |
| 2432 | * to 1.2V. |
| 2433 | */ |
| 2434 | if (sc->bnx_asicrev == BGE_ASICREV_BCM5704 && |
| 2435 | (sc->bnx_flags & BNX_FLAG_TBI)) { |
| 2436 | uint32_t serdescfg; |
| 2437 | |
| 2438 | serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); |
| 2439 | serdescfg = (serdescfg & ~0xFFF) | 0x880; |
| 2440 | CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); |
| 2441 | } |
| 2442 | |
| 2443 | /* XXX: Broadcom Linux driver. */ |
| 2444 | if (!BNX_IS_57765_PLUS(sc)) { |
| 2445 | uint32_t v; |
| 2446 | |
| 2447 | /* Enable Data FIFO protection. */ |
| 2448 | v = CSR_READ_4(sc, BGE_PCIE_TLDLPL_PORT); |
| 2449 | CSR_WRITE_4(sc, BGE_PCIE_TLDLPL_PORT, v | (1 << 25)); |
| 2450 | } |
| 2451 | |
| 2452 | DELAY(10000); |
| 2453 | |
| 2454 | if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) { |
| 2455 | BNX_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE, |
| 2456 | CPMU_CLCK_ORIDE_MAC_ORIDE_EN); |
| 2457 | } |
| 2458 | } |
| 2459 | |
| 2460 | /* |
| 2461 | * Frame reception handling. This is called if there's a frame |
| 2462 | * on the receive return list. |
| 2463 | * |
| 2464 | * Note: we have to be able to handle two possibilities here: |
| 2465 | * 1) the frame is from the jumbo recieve ring |
| 2466 | * 2) the frame is from the standard receive ring |
| 2467 | */ |
| 2468 | |
| 2469 | static void |
| 2470 | bnx_rxeof(struct bnx_softc *sc, uint16_t rx_prod) |
| 2471 | { |
| 2472 | struct ifnet *ifp; |
| 2473 | int stdcnt = 0, jumbocnt = 0; |
| 2474 | |
| 2475 | ifp = &sc->arpcom.ac_if; |
| 2476 | |
| 2477 | while (sc->bnx_rx_saved_considx != rx_prod) { |
| 2478 | struct bge_rx_bd *cur_rx; |
| 2479 | uint32_t rxidx; |
| 2480 | struct mbuf *m = NULL; |
| 2481 | uint16_t vlan_tag = 0; |
| 2482 | int have_tag = 0; |
| 2483 | |
| 2484 | cur_rx = |
| 2485 | &sc->bnx_ldata.bnx_rx_return_ring[sc->bnx_rx_saved_considx]; |
| 2486 | |
| 2487 | rxidx = cur_rx->bge_idx; |
| 2488 | BNX_INC(sc->bnx_rx_saved_considx, sc->bnx_return_ring_cnt); |
| 2489 | |
| 2490 | if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { |
| 2491 | have_tag = 1; |
| 2492 | vlan_tag = cur_rx->bge_vlan_tag; |
| 2493 | } |
| 2494 | |
| 2495 | if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { |
| 2496 | BNX_INC(sc->bnx_jumbo, BGE_JUMBO_RX_RING_CNT); |
| 2497 | jumbocnt++; |
| 2498 | |
| 2499 | if (rxidx != sc->bnx_jumbo) { |
| 2500 | ifp->if_ierrors++; |
| 2501 | if_printf(ifp, "sw jumbo index(%d) " |
| 2502 | "and hw jumbo index(%d) mismatch, drop!\n", |
| 2503 | sc->bnx_jumbo, rxidx); |
| 2504 | bnx_setup_rxdesc_jumbo(sc, rxidx); |
| 2505 | continue; |
| 2506 | } |
| 2507 | |
| 2508 | m = sc->bnx_cdata.bnx_rx_jumbo_chain[rxidx].bnx_mbuf; |
| 2509 | if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { |
| 2510 | ifp->if_ierrors++; |
| 2511 | bnx_setup_rxdesc_jumbo(sc, sc->bnx_jumbo); |
| 2512 | continue; |
| 2513 | } |
| 2514 | if (bnx_newbuf_jumbo(sc, sc->bnx_jumbo, 0)) { |
| 2515 | ifp->if_ierrors++; |
| 2516 | bnx_setup_rxdesc_jumbo(sc, sc->bnx_jumbo); |
| 2517 | continue; |
| 2518 | } |
| 2519 | } else { |
| 2520 | BNX_INC(sc->bnx_std, BGE_STD_RX_RING_CNT); |
| 2521 | stdcnt++; |
| 2522 | |
| 2523 | if (rxidx != sc->bnx_std) { |
| 2524 | ifp->if_ierrors++; |
| 2525 | if_printf(ifp, "sw std index(%d) " |
| 2526 | "and hw std index(%d) mismatch, drop!\n", |
| 2527 | sc->bnx_std, rxidx); |
| 2528 | bnx_setup_rxdesc_std(sc, rxidx); |
| 2529 | continue; |
| 2530 | } |
| 2531 | |
| 2532 | m = sc->bnx_cdata.bnx_rx_std_chain[rxidx].bnx_mbuf; |
| 2533 | if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { |
| 2534 | ifp->if_ierrors++; |
| 2535 | bnx_setup_rxdesc_std(sc, sc->bnx_std); |
| 2536 | continue; |
| 2537 | } |
| 2538 | if (bnx_newbuf_std(sc, sc->bnx_std, 0)) { |
| 2539 | ifp->if_ierrors++; |
| 2540 | bnx_setup_rxdesc_std(sc, sc->bnx_std); |
| 2541 | continue; |
| 2542 | } |
| 2543 | } |
| 2544 | |
| 2545 | ifp->if_ipackets++; |
| 2546 | m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; |
| 2547 | m->m_pkthdr.rcvif = ifp; |
| 2548 | |
| 2549 | if ((ifp->if_capenable & IFCAP_RXCSUM) && |
| 2550 | (cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) { |
| 2551 | if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { |
| 2552 | m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; |
| 2553 | if ((cur_rx->bge_error_flag & |
| 2554 | BGE_RXERRFLAG_IP_CSUM_NOK) == 0) |
| 2555 | m->m_pkthdr.csum_flags |= CSUM_IP_VALID; |
| 2556 | } |
| 2557 | if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { |
| 2558 | m->m_pkthdr.csum_data = |
| 2559 | cur_rx->bge_tcp_udp_csum; |
| 2560 | m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | |
| 2561 | CSUM_PSEUDO_HDR; |
| 2562 | } |
| 2563 | } |
| 2564 | |
| 2565 | /* |
| 2566 | * If we received a packet with a vlan tag, pass it |
| 2567 | * to vlan_input() instead of ether_input(). |
| 2568 | */ |
| 2569 | if (have_tag) { |
| 2570 | m->m_flags |= M_VLANTAG; |
| 2571 | m->m_pkthdr.ether_vlantag = vlan_tag; |
| 2572 | have_tag = vlan_tag = 0; |
| 2573 | } |
| 2574 | ifp->if_input(ifp, m); |
| 2575 | } |
| 2576 | |
| 2577 | bnx_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bnx_rx_saved_considx); |
| 2578 | if (stdcnt) |
| 2579 | bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bnx_std); |
| 2580 | if (jumbocnt) |
| 2581 | bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo); |
| 2582 | } |
| 2583 | |
| 2584 | static void |
| 2585 | bnx_txeof(struct bnx_softc *sc, uint16_t tx_cons) |
| 2586 | { |
| 2587 | struct bge_tx_bd *cur_tx = NULL; |
| 2588 | struct ifnet *ifp; |
| 2589 | |
| 2590 | ifp = &sc->arpcom.ac_if; |
| 2591 | |
| 2592 | /* |
| 2593 | * Go through our tx ring and free mbufs for those |
| 2594 | * frames that have been sent. |
| 2595 | */ |
| 2596 | while (sc->bnx_tx_saved_considx != tx_cons) { |
| 2597 | uint32_t idx = 0; |
| 2598 | |
| 2599 | idx = sc->bnx_tx_saved_considx; |
| 2600 | cur_tx = &sc->bnx_ldata.bnx_tx_ring[idx]; |
| 2601 | if (cur_tx->bge_flags & BGE_TXBDFLAG_END) |
| 2602 | ifp->if_opackets++; |
| 2603 | if (sc->bnx_cdata.bnx_tx_chain[idx] != NULL) { |
| 2604 | bus_dmamap_unload(sc->bnx_cdata.bnx_tx_mtag, |
| 2605 | sc->bnx_cdata.bnx_tx_dmamap[idx]); |
| 2606 | m_freem(sc->bnx_cdata.bnx_tx_chain[idx]); |
| 2607 | sc->bnx_cdata.bnx_tx_chain[idx] = NULL; |
| 2608 | } |
| 2609 | sc->bnx_txcnt--; |
| 2610 | BNX_INC(sc->bnx_tx_saved_considx, BGE_TX_RING_CNT); |
| 2611 | } |
| 2612 | |
| 2613 | if (cur_tx != NULL && |
| 2614 | (BGE_TX_RING_CNT - sc->bnx_txcnt) >= |
| 2615 | (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) |
| 2616 | ifp->if_flags &= ~IFF_OACTIVE; |
| 2617 | |
| 2618 | if (sc->bnx_txcnt == 0) |
| 2619 | ifp->if_timer = 0; |
| 2620 | |
| 2621 | if (!ifq_is_empty(&ifp->if_snd)) |
| 2622 | if_devstart(ifp); |
| 2623 | } |
| 2624 | |
| 2625 | #ifdef DEVICE_POLLING |
| 2626 | |
| 2627 | static void |
| 2628 | bnx_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) |
| 2629 | { |
| 2630 | struct bnx_softc *sc = ifp->if_softc; |
| 2631 | struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block; |
| 2632 | uint16_t rx_prod, tx_cons; |
| 2633 | |
| 2634 | switch(cmd) { |
| 2635 | case POLL_REGISTER: |
| 2636 | bnx_disable_intr(sc); |
| 2637 | break; |
| 2638 | case POLL_DEREGISTER: |
| 2639 | bnx_enable_intr(sc); |
| 2640 | break; |
| 2641 | case POLL_AND_CHECK_STATUS: |
| 2642 | /* |
| 2643 | * Process link state changes. |
| 2644 | */ |
| 2645 | bnx_link_poll(sc); |
| 2646 | /* Fall through */ |
| 2647 | case POLL_ONLY: |
| 2648 | sc->bnx_status_tag = sblk->bge_status_tag; |
| 2649 | /* |
| 2650 | * Use a load fence to ensure that status_tag |
| 2651 | * is saved before rx_prod and tx_cons. |
| 2652 | */ |
| 2653 | cpu_lfence(); |
| 2654 | |
| 2655 | rx_prod = sblk->bge_idx[0].bge_rx_prod_idx; |
| 2656 | tx_cons = sblk->bge_idx[0].bge_tx_cons_idx; |
| 2657 | if (ifp->if_flags & IFF_RUNNING) { |
| 2658 | rx_prod = sblk->bge_idx[0].bge_rx_prod_idx; |
| 2659 | if (sc->bnx_rx_saved_considx != rx_prod) |
| 2660 | bnx_rxeof(sc, rx_prod); |
| 2661 | |
| 2662 | tx_cons = sblk->bge_idx[0].bge_tx_cons_idx; |
| 2663 | if (sc->bnx_tx_saved_considx != tx_cons) |
| 2664 | bnx_txeof(sc, tx_cons); |
| 2665 | } |
| 2666 | break; |
| 2667 | } |
| 2668 | } |
| 2669 | |
| 2670 | #endif |
| 2671 | |
| 2672 | static void |
| 2673 | bnx_intr_legacy(void *xsc) |
| 2674 | { |
| 2675 | struct bnx_softc *sc = xsc; |
| 2676 | struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block; |
| 2677 | |
| 2678 | if (sc->bnx_status_tag == sblk->bge_status_tag) { |
| 2679 | uint32_t val; |
| 2680 | |
| 2681 | val = pci_read_config(sc->bnx_dev, BGE_PCI_PCISTATE, 4); |
| 2682 | if (val & BGE_PCISTAT_INTR_NOTACT) |
| 2683 | return; |
| 2684 | } |
| 2685 | |
| 2686 | /* |
| 2687 | * NOTE: |
| 2688 | * Interrupt will have to be disabled if tagged status |
| 2689 | * is used, else interrupt will always be asserted on |
| 2690 | * certain chips (at least on BCM5750 AX/BX). |
| 2691 | */ |
| 2692 | bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1); |
| 2693 | |
| 2694 | bnx_intr(sc); |
| 2695 | } |
| 2696 | |
| 2697 | static void |
| 2698 | bnx_msi(void *xsc) |
| 2699 | { |
| 2700 | struct bnx_softc *sc = xsc; |
| 2701 | |
| 2702 | /* Disable interrupt first */ |
| 2703 | bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1); |
| 2704 | bnx_intr(sc); |
| 2705 | } |
| 2706 | |
| 2707 | static void |
| 2708 | bnx_msi_oneshot(void *xsc) |
| 2709 | { |
| 2710 | bnx_intr(xsc); |
| 2711 | } |
| 2712 | |
| 2713 | static void |
| 2714 | bnx_intr(struct bnx_softc *sc) |
| 2715 | { |
| 2716 | struct ifnet *ifp = &sc->arpcom.ac_if; |
| 2717 | struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block; |
| 2718 | uint16_t rx_prod, tx_cons; |
| 2719 | uint32_t status; |
| 2720 | |
| 2721 | sc->bnx_status_tag = sblk->bge_status_tag; |
| 2722 | /* |
| 2723 | * Use a load fence to ensure that status_tag is saved |
| 2724 | * before rx_prod, tx_cons and status. |
| 2725 | */ |
| 2726 | cpu_lfence(); |
| 2727 | |
| 2728 | rx_prod = sblk->bge_idx[0].bge_rx_prod_idx; |
| 2729 | tx_cons = sblk->bge_idx[0].bge_tx_cons_idx; |
| 2730 | status = sblk->bge_status; |
| 2731 | |
| 2732 | if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) || sc->bnx_link_evt) |
| 2733 | bnx_link_poll(sc); |
| 2734 | |
| 2735 | if (ifp->if_flags & IFF_RUNNING) { |
| 2736 | if (sc->bnx_rx_saved_considx != rx_prod) |
| 2737 | bnx_rxeof(sc, rx_prod); |
| 2738 | |
| 2739 | if (sc->bnx_tx_saved_considx != tx_cons) |
| 2740 | bnx_txeof(sc, tx_cons); |
| 2741 | } |
| 2742 | |
| 2743 | bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24); |
| 2744 | |
| 2745 | if (sc->bnx_coal_chg) |
| 2746 | bnx_coal_change(sc); |
| 2747 | } |
| 2748 | |
| 2749 | static void |
| 2750 | bnx_tick(void *xsc) |
| 2751 | { |
| 2752 | struct bnx_softc *sc = xsc; |
| 2753 | struct ifnet *ifp = &sc->arpcom.ac_if; |
| 2754 | |
| 2755 | lwkt_serialize_enter(ifp->if_serializer); |
| 2756 | |
| 2757 | KKASSERT(mycpuid == sc->bnx_stat_cpuid); |
| 2758 | |
| 2759 | bnx_stats_update_regs(sc); |
| 2760 | |
| 2761 | if (sc->bnx_flags & BNX_FLAG_TBI) { |
| 2762 | /* |
| 2763 | * Since in TBI mode auto-polling can't be used we should poll |
| 2764 | * link status manually. Here we register pending link event |
| 2765 | * and trigger interrupt. |
| 2766 | */ |
| 2767 | sc->bnx_link_evt++; |
| 2768 | BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); |
| 2769 | } else if (!sc->bnx_link) { |
| 2770 | mii_tick(device_get_softc(sc->bnx_miibus)); |
| 2771 | } |
| 2772 | |
| 2773 | callout_reset(&sc->bnx_stat_timer, hz, bnx_tick, sc); |
| 2774 | |
| 2775 | lwkt_serialize_exit(ifp->if_serializer); |
| 2776 | } |
| 2777 | |
| 2778 | static void |
| 2779 | bnx_stats_update_regs(struct bnx_softc *sc) |
| 2780 | { |
| 2781 | struct ifnet *ifp = &sc->arpcom.ac_if; |
| 2782 | struct bge_mac_stats_regs stats; |
| 2783 | uint32_t *s; |
| 2784 | int i; |
| 2785 | |
| 2786 | s = (uint32_t *)&stats; |
| 2787 | for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) { |
| 2788 | *s = CSR_READ_4(sc, BGE_RX_STATS + i); |
| 2789 | s++; |
| 2790 | } |
| 2791 | |
| 2792 | ifp->if_collisions += |
| 2793 | (stats.dot3StatsSingleCollisionFrames + |
| 2794 | stats.dot3StatsMultipleCollisionFrames + |
| 2795 | stats.dot3StatsExcessiveCollisions + |
| 2796 | stats.dot3StatsLateCollisions) - |
| 2797 | ifp->if_collisions; |
| 2798 | } |
| 2799 | |
| 2800 | /* |
| 2801 | * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data |
| 2802 | * pointers to descriptors. |
| 2803 | */ |
| 2804 | static int |
| 2805 | bnx_encap(struct bnx_softc *sc, struct mbuf **m_head0, uint32_t *txidx) |
| 2806 | { |
| 2807 | struct bge_tx_bd *d = NULL; |
| 2808 | uint16_t csum_flags = 0, vlan_tag = 0, mss = 0; |
| 2809 | bus_dma_segment_t segs[BNX_NSEG_NEW]; |
| 2810 | bus_dmamap_t map; |
| 2811 | int error, maxsegs, nsegs, idx, i; |
| 2812 | struct mbuf *m_head = *m_head0, *m_new; |
| 2813 | |
| 2814 | if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { |
| 2815 | #ifdef BNX_TSO_DEBUG |
| 2816 | int tso_nsegs; |
| 2817 | #endif |
| 2818 | |
| 2819 | error = bnx_setup_tso(sc, m_head0, &mss, &csum_flags); |
| 2820 | if (error) |
| 2821 | return error; |
| 2822 | m_head = *m_head0; |
| 2823 | |
| 2824 | #ifdef BNX_TSO_DEBUG |
| 2825 | tso_nsegs = (m_head->m_pkthdr.len / |
| 2826 | m_head->m_pkthdr.tso_segsz) - 1; |
| 2827 | if (tso_nsegs > (BNX_TSO_NSTATS - 1)) |
| 2828 | tso_nsegs = BNX_TSO_NSTATS - 1; |
| 2829 | else if (tso_nsegs < 0) |
| 2830 | tso_nsegs = 0; |
| 2831 | sc->bnx_tsosegs[tso_nsegs]++; |
| 2832 | #endif |
| 2833 | } else if (m_head->m_pkthdr.csum_flags & BNX_CSUM_FEATURES) { |
| 2834 | if (m_head->m_pkthdr.csum_flags & CSUM_IP) |
| 2835 | csum_flags |= BGE_TXBDFLAG_IP_CSUM; |
| 2836 | if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) |
| 2837 | csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; |
| 2838 | if (m_head->m_flags & M_LASTFRAG) |
| 2839 | csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; |
| 2840 | else if (m_head->m_flags & M_FRAG) |
| 2841 | csum_flags |= BGE_TXBDFLAG_IP_FRAG; |
| 2842 | } |
| 2843 | if (m_head->m_flags & M_VLANTAG) { |
| 2844 | csum_flags |= BGE_TXBDFLAG_VLAN_TAG; |
| 2845 | vlan_tag = m_head->m_pkthdr.ether_vlantag; |
| 2846 | } |
| 2847 | |
| 2848 | idx = *txidx; |
| 2849 | map = sc->bnx_cdata.bnx_tx_dmamap[idx]; |
| 2850 | |
| 2851 | maxsegs = (BGE_TX_RING_CNT - sc->bnx_txcnt) - BNX_NSEG_RSVD; |
| 2852 | KASSERT(maxsegs >= BNX_NSEG_SPARE, |
| 2853 | ("not enough segments %d", maxsegs)); |
| 2854 | |
| 2855 | if (maxsegs > BNX_NSEG_NEW) |
| 2856 | maxsegs = BNX_NSEG_NEW; |
| 2857 | |
| 2858 | /* |
| 2859 | * Pad outbound frame to BGE_MIN_FRAMELEN for an unusual reason. |
| 2860 | * The bge hardware will pad out Tx runts to BGE_MIN_FRAMELEN, |
| 2861 | * but when such padded frames employ the bge IP/TCP checksum |
| 2862 | * offload, the hardware checksum assist gives incorrect results |
| 2863 | * (possibly from incorporating its own padding into the UDP/TCP |
| 2864 | * checksum; who knows). If we pad such runts with zeros, the |
| 2865 | * onboard checksum comes out correct. |
| 2866 | */ |
| 2867 | if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) && |
| 2868 | m_head->m_pkthdr.len < BNX_MIN_FRAMELEN) { |
| 2869 | error = m_devpad(m_head, BNX_MIN_FRAMELEN); |
| 2870 | if (error) |
| 2871 | goto back; |
| 2872 | } |
| 2873 | |
| 2874 | if ((sc->bnx_flags & BNX_FLAG_SHORTDMA) && m_head->m_next != NULL) { |
| 2875 | m_new = bnx_defrag_shortdma(m_head); |
| 2876 | if (m_new == NULL) { |
| 2877 | error = ENOBUFS; |
| 2878 | goto back; |
| 2879 | } |
| 2880 | *m_head0 = m_head = m_new; |
| 2881 | } |
| 2882 | if ((m_head->m_pkthdr.csum_flags & CSUM_TSO) == 0 && |
| 2883 | sc->bnx_force_defrag && m_head->m_next != NULL) { |
| 2884 | /* |
| 2885 | * Forcefully defragment mbuf chain to overcome hardware |
| 2886 | * limitation which only support a single outstanding |
| 2887 | * DMA read operation. If it fails, keep moving on using |
| 2888 | * the original mbuf chain. |
| 2889 | */ |
| 2890 | m_new = m_defrag(m_head, MB_DONTWAIT); |
| 2891 | if (m_new != NULL) |
| 2892 | *m_head0 = m_head = m_new; |
| 2893 | } |
| 2894 | |
| 2895 | error = bus_dmamap_load_mbuf_defrag(sc->bnx_cdata.bnx_tx_mtag, map, |
| 2896 | m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); |
| 2897 | if (error) |
| 2898 | goto back; |
| 2899 | |
| 2900 | m_head = *m_head0; |
| 2901 | bus_dmamap_sync(sc->bnx_cdata.bnx_tx_mtag, map, BUS_DMASYNC_PREWRITE); |
| 2902 | |
| 2903 | for (i = 0; ; i++) { |
| 2904 | d = &sc->bnx_ldata.bnx_tx_ring[idx]; |
| 2905 | |
| 2906 | d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); |
| 2907 | d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); |
| 2908 | d->bge_len = segs[i].ds_len; |
| 2909 | d->bge_flags = csum_flags; |
| 2910 | d->bge_vlan_tag = vlan_tag; |
| 2911 | d->bge_mss = mss; |
| 2912 | |
| 2913 | if (i == nsegs - 1) |
| 2914 | break; |
| 2915 | BNX_INC(idx, BGE_TX_RING_CNT); |
| 2916 | } |
| 2917 | /* Mark the last segment as end of packet... */ |
| 2918 | d->bge_flags |= BGE_TXBDFLAG_END; |
| 2919 | |
| 2920 | /* |
| 2921 | * Insure that the map for this transmission is placed at |
| 2922 | * the array index of the last descriptor in this chain. |
| 2923 | */ |
| 2924 | sc->bnx_cdata.bnx_tx_dmamap[*txidx] = sc->bnx_cdata.bnx_tx_dmamap[idx]; |
| 2925 | sc->bnx_cdata.bnx_tx_dmamap[idx] = map; |
| 2926 | sc->bnx_cdata.bnx_tx_chain[idx] = m_head; |
| 2927 | sc->bnx_txcnt += nsegs; |
| 2928 | |
| 2929 | BNX_INC(idx, BGE_TX_RING_CNT); |
| 2930 | *txidx = idx; |
| 2931 | back: |
| 2932 | if (error) { |
| 2933 | m_freem(*m_head0); |
| 2934 | *m_head0 = NULL; |
| 2935 | } |
| 2936 | return error; |
| 2937 | } |
| 2938 | |
| 2939 | /* |
| 2940 | * Main transmit routine. To avoid having to do mbuf copies, we put pointers |
| 2941 | * to the mbuf data regions directly in the transmit descriptors. |
| 2942 | */ |
| 2943 | static void |
| 2944 | bnx_start(struct ifnet *ifp) |
| 2945 | { |
| 2946 | struct bnx_softc *sc = ifp->if_softc; |
| 2947 | struct mbuf *m_head = NULL; |
| 2948 | uint32_t prodidx; |
| 2949 | int need_trans; |
| 2950 | |
| 2951 | if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) |
| 2952 | return; |
| 2953 | |
| 2954 | prodidx = sc->bnx_tx_prodidx; |
| 2955 | |
| 2956 | need_trans = 0; |
| 2957 | while (sc->bnx_cdata.bnx_tx_chain[prodidx] == NULL) { |
| 2958 | /* |
| 2959 | * Sanity check: avoid coming within BGE_NSEG_RSVD |
| 2960 | * descriptors of the end of the ring. Also make |
| 2961 | * sure there are BGE_NSEG_SPARE descriptors for |
| 2962 | * jumbo buffers' or TSO segments' defragmentation. |
| 2963 | */ |
| 2964 | if ((BGE_TX_RING_CNT - sc->bnx_txcnt) < |
| 2965 | (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) { |
| 2966 | ifp->if_flags |= IFF_OACTIVE; |
| 2967 | break; |
| 2968 | } |
| 2969 | |
| 2970 | m_head = ifq_dequeue(&ifp->if_snd, NULL); |
| 2971 | if (m_head == NULL) |
| 2972 | break; |
| 2973 | |
| 2974 | /* |
| 2975 | * Pack the data into the transmit ring. If we |
| 2976 | * don't have room, set the OACTIVE flag and wait |
| 2977 | * for the NIC to drain the ring. |
| 2978 | */ |
| 2979 | if (bnx_encap(sc, &m_head, &prodidx)) { |
| 2980 | ifp->if_flags |= IFF_OACTIVE; |
| 2981 | ifp->if_oerrors++; |
| 2982 | break; |
| 2983 | } |
| 2984 | need_trans = 1; |
| 2985 | |
| 2986 | ETHER_BPF_MTAP(ifp, m_head); |
| 2987 | } |
| 2988 | |
| 2989 | if (!need_trans) |
| 2990 | return; |
| 2991 | |
| 2992 | /* Transmit */ |
| 2993 | bnx_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); |
| 2994 | |
| 2995 | sc->bnx_tx_prodidx = prodidx; |
| 2996 | |
| 2997 | /* |
| 2998 | * Set a timeout in case the chip goes out to lunch. |
| 2999 | */ |
| 3000 | ifp->if_timer = 5; |
| 3001 | } |
| 3002 | |
| 3003 | static void |
| 3004 | bnx_init(void *xsc) |
| 3005 | { |
| 3006 | struct bnx_softc *sc = xsc; |
| 3007 | struct ifnet *ifp = &sc->arpcom.ac_if; |
| 3008 | uint16_t *m; |
| 3009 | uint32_t mode; |
| 3010 | |
| 3011 | ASSERT_SERIALIZED(ifp->if_serializer); |
| 3012 | |
| 3013 | /* Cancel pending I/O and flush buffers. */ |
| 3014 | bnx_stop(sc); |
| 3015 | bnx_reset(sc); |
| 3016 | bnx_chipinit(sc); |
| 3017 | |
| 3018 | /* |
| 3019 | * Init the various state machines, ring |
| 3020 | * control blocks and firmware. |
| 3021 | */ |
| 3022 | if (bnx_blockinit(sc)) { |
| 3023 | if_printf(ifp, "initialization failure\n"); |
| 3024 | bnx_stop(sc); |
| 3025 | return; |
| 3026 | } |
| 3027 | |
| 3028 | /* Specify MTU. */ |
| 3029 | CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + |
| 3030 | ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN); |
| 3031 | |
| 3032 | /* Load our MAC address. */ |
| 3033 | m = (uint16_t *)&sc->arpcom.ac_enaddr[0]; |
| 3034 | CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); |
| 3035 | CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); |
| 3036 | |
| 3037 | /* Enable or disable promiscuous mode as needed. */ |
| 3038 | bnx_setpromisc(sc); |
| 3039 | |
| 3040 | /* Program multicast filter. */ |
| 3041 | bnx_setmulti(sc); |
| 3042 | |
| 3043 | /* Init RX ring. */ |
| 3044 | if (bnx_init_rx_ring_std(sc)) { |
| 3045 | if_printf(ifp, "RX ring initialization failed\n"); |
| 3046 | bnx_stop(sc); |
| 3047 | return; |
| 3048 | } |
| 3049 | |
| 3050 | /* Init jumbo RX ring. */ |
| 3051 | if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) { |
| 3052 | if (bnx_init_rx_ring_jumbo(sc)) { |
| 3053 | if_printf(ifp, "Jumbo RX ring initialization failed\n"); |
| 3054 | bnx_stop(sc); |
| 3055 | return; |
| 3056 | } |
| 3057 | } |
| 3058 | |
| 3059 | /* Init our RX return ring index */ |
| 3060 | sc->bnx_rx_saved_considx = 0; |
| 3061 | |
| 3062 | /* Init TX ring. */ |
| 3063 | bnx_init_tx_ring(sc); |
| 3064 | |
| 3065 | /* Enable TX MAC state machine lockup fix. */ |
| 3066 | mode = CSR_READ_4(sc, BGE_TX_MODE); |
| 3067 | mode |= BGE_TXMODE_MBUF_LOCKUP_FIX; |
| 3068 | if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) { |
| 3069 | mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); |
| 3070 | mode |= CSR_READ_4(sc, BGE_TX_MODE) & |
| 3071 | (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); |
| 3072 | } |
| 3073 | /* Turn on transmitter */ |
| 3074 | CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE); |
| 3075 | |
| 3076 | /* Turn on receiver */ |
| 3077 | BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); |
| 3078 | |
| 3079 | /* |
| 3080 | * Set the number of good frames to receive after RX MBUF |
| 3081 | * Low Watermark has been reached. After the RX MAC receives |
| 3082 | * this number of frames, it will drop subsequent incoming |
| 3083 | * frames until the MBUF High Watermark is reached. |
| 3084 | */ |
| 3085 | if (BNX_IS_57765_FAMILY(sc)) |
| 3086 | CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1); |
| 3087 | else |
| 3088 | CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); |
| 3089 | |
| 3090 | if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) { |
| 3091 | if (bootverbose) { |
| 3092 | if_printf(ifp, "MSI_MODE: %#x\n", |
| 3093 | CSR_READ_4(sc, BGE_MSI_MODE)); |
| 3094 | } |
| 3095 | } |
| 3096 | |
| 3097 | /* Tell firmware we're alive. */ |
| 3098 | BNX_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); |
| 3099 | |
| 3100 | /* Enable host interrupts if polling(4) is not enabled. */ |
| 3101 | PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA, 4); |
| 3102 | #ifdef DEVICE_POLLING |
| 3103 | if (ifp->if_flags & IFF_POLLING) |
| 3104 | bnx_disable_intr(sc); |
| 3105 | else |
| 3106 | #endif |
| 3107 | bnx_enable_intr(sc); |
| 3108 | |
| 3109 | bnx_ifmedia_upd(ifp); |
| 3110 | |
| 3111 | ifp->if_flags |= IFF_RUNNING; |
| 3112 | ifp->if_flags &= ~IFF_OACTIVE; |
| 3113 | |
| 3114 | callout_reset_bycpu(&sc->bnx_stat_timer, hz, bnx_tick, sc, |
| 3115 | sc->bnx_stat_cpuid); |
| 3116 | } |
| 3117 | |
| 3118 | /* |
| 3119 | * Set media options. |
| 3120 | */ |
| 3121 | static int |
| 3122 | bnx_ifmedia_upd(struct ifnet *ifp) |
| 3123 | { |
| 3124 | struct bnx_softc *sc = ifp->if_softc; |
| 3125 | |
| 3126 | /* If this is a 1000baseX NIC, enable the TBI port. */ |
| 3127 | if (sc->bnx_flags & BNX_FLAG_TBI) { |
| 3128 | struct ifmedia *ifm = &sc->bnx_ifmedia; |
| 3129 | |
| 3130 | if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) |
| 3131 | return(EINVAL); |
| 3132 | |
| 3133 | switch(IFM_SUBTYPE(ifm->ifm_media)) { |
| 3134 | case IFM_AUTO: |
| 3135 | break; |
| 3136 | |
| 3137 | case IFM_1000_SX: |
| 3138 | if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { |
| 3139 | BNX_CLRBIT(sc, BGE_MAC_MODE, |
| 3140 | BGE_MACMODE_HALF_DUPLEX); |
| 3141 | } else { |
| 3142 | BNX_SETBIT(sc, BGE_MAC_MODE, |
| 3143 | BGE_MACMODE_HALF_DUPLEX); |
| 3144 | } |
| 3145 | break; |
| 3146 | default: |
| 3147 | return(EINVAL); |
| 3148 | } |
| 3149 | } else { |
| 3150 | struct mii_data *mii = device_get_softc(sc->bnx_miibus); |
| 3151 | |
| 3152 | sc->bnx_link_evt++; |
| 3153 | sc->bnx_link = 0; |
| 3154 | if (mii->mii_instance) { |
| 3155 | struct mii_softc *miisc; |
| 3156 | |
| 3157 | LIST_FOREACH(miisc, &mii->mii_phys, mii_list) |
| 3158 | mii_phy_reset(miisc); |
| 3159 | } |
| 3160 | mii_mediachg(mii); |
| 3161 | |
| 3162 | /* |
| 3163 | * Force an interrupt so that we will call bnx_link_upd |
| 3164 | * if needed and clear any pending link state attention. |
| 3165 | * Without this we are not getting any further interrupts |
| 3166 | * for link state changes and thus will not UP the link and |
| 3167 | * not be able to send in bnx_start. The only way to get |
| 3168 | * things working was to receive a packet and get an RX |
| 3169 | * intr. |
| 3170 | * |
| 3171 | * bnx_tick should help for fiber cards and we might not |
| 3172 | * need to do this here if BNX_FLAG_TBI is set but as |
| 3173 | * we poll for fiber anyway it should not harm. |
| 3174 | */ |
| 3175 | BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); |
| 3176 | } |
| 3177 | return(0); |
| 3178 | } |
| 3179 | |
| 3180 | /* |
| 3181 | * Report current media status. |
| 3182 | */ |
| 3183 | static void |
| 3184 | bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) |
| 3185 | { |
| 3186 | struct bnx_softc *sc = ifp->if_softc; |
| 3187 | |
| 3188 | if (sc->bnx_flags & BNX_FLAG_TBI) { |
| 3189 | ifmr->ifm_status = IFM_AVALID; |
| 3190 | ifmr->ifm_active = IFM_ETHER; |
| 3191 | if (CSR_READ_4(sc, BGE_MAC_STS) & |
| 3192 | BGE_MACSTAT_TBI_PCS_SYNCHED) { |
| 3193 | ifmr->ifm_status |= IFM_ACTIVE; |
| 3194 | } else { |
| 3195 | ifmr->ifm_active |= IFM_NONE; |
| 3196 | return; |
| 3197 | } |
| 3198 | |
| 3199 | ifmr->ifm_active |= IFM_1000_SX; |
| 3200 | if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) |
| 3201 | ifmr->ifm_active |= IFM_HDX; |
| 3202 | else |
| 3203 | ifmr->ifm_active |= IFM_FDX; |
| 3204 | } else { |
| 3205 | struct mii_data *mii = device_get_softc(sc->bnx_miibus); |
| 3206 | |
| 3207 | mii_pollstat(mii); |
| 3208 | ifmr->ifm_active = mii->mii_media_active; |
| 3209 | ifmr->ifm_status = mii->mii_media_status; |
| 3210 | } |
| 3211 | } |
| 3212 | |
| 3213 | static int |
| 3214 | bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) |
| 3215 | { |
| 3216 | struct bnx_softc *sc = ifp->if_softc; |
| 3217 | struct ifreq *ifr = (struct ifreq *)data; |
| 3218 | int mask, error = 0; |
| 3219 | |
| 3220 | ASSERT_SERIALIZED(ifp->if_serializer); |
| 3221 | |
| 3222 | switch (command) { |
| 3223 | case SIOCSIFMTU: |
| 3224 | if ((!BNX_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) || |
| 3225 | (BNX_IS_JUMBO_CAPABLE(sc) && |
| 3226 | ifr->ifr_mtu > BNX_JUMBO_MTU)) { |
| 3227 | error = EINVAL; |
| 3228 | } else if (ifp->if_mtu != ifr->ifr_mtu) { |
| 3229 | ifp->if_mtu = ifr->ifr_mtu; |
| 3230 | if (ifp->if_flags & IFF_RUNNING) |
| 3231 | bnx_init(sc); |
| 3232 | } |
| 3233 | break; |
| 3234 | case SIOCSIFFLAGS: |
| 3235 | if (ifp->if_flags & IFF_UP) { |
| 3236 | if (ifp->if_flags & IFF_RUNNING) { |
| 3237 | mask = ifp->if_flags ^ sc->bnx_if_flags; |
| 3238 | |
| 3239 | /* |
| 3240 | * If only the state of the PROMISC flag |
| 3241 | * changed, then just use the 'set promisc |
| 3242 | * mode' command instead of reinitializing |
| 3243 | * the entire NIC. Doing a full re-init |
| 3244 | * means reloading the firmware and waiting |
| 3245 | * for it to start up, which may take a |
| 3246 | * second or two. Similarly for ALLMULTI. |
| 3247 | */ |
| 3248 | if (mask & IFF_PROMISC) |
| 3249 | bnx_setpromisc(sc); |
| 3250 | if (mask & IFF_ALLMULTI) |
| 3251 | bnx_setmulti(sc); |
| 3252 | } else { |
| 3253 | bnx_init(sc); |
| 3254 | } |
| 3255 | } else if (ifp->if_flags & IFF_RUNNING) { |
| 3256 | bnx_stop(sc); |
| 3257 | } |
| 3258 | sc->bnx_if_flags = ifp->if_flags; |
| 3259 | break; |
| 3260 | case SIOCADDMULTI: |
| 3261 | case SIOCDELMULTI: |
| 3262 | if (ifp->if_flags & IFF_RUNNING) |
| 3263 | bnx_setmulti(sc); |
| 3264 | break; |
| 3265 | case SIOCSIFMEDIA: |
| 3266 | case SIOCGIFMEDIA: |
| 3267 | if (sc->bnx_flags & BNX_FLAG_TBI) { |
| 3268 | error = ifmedia_ioctl(ifp, ifr, |
| 3269 | &sc->bnx_ifmedia, command); |
| 3270 | } else { |
| 3271 | struct mii_data *mii; |
| 3272 | |
| 3273 | mii = device_get_softc(sc->bnx_miibus); |
| 3274 | error = ifmedia_ioctl(ifp, ifr, |
| 3275 | &mii->mii_media, command); |
| 3276 | } |
| 3277 | break; |
| 3278 | case SIOCSIFCAP: |
| 3279 | mask = ifr->ifr_reqcap ^ ifp->if_capenable; |
| 3280 | if (mask & IFCAP_HWCSUM) { |
| 3281 | ifp->if_capenable ^= (mask & IFCAP_HWCSUM); |
| 3282 | if (ifp->if_capenable & IFCAP_TXCSUM) |
| 3283 | ifp->if_hwassist |= BNX_CSUM_FEATURES; |
| 3284 | else |
| 3285 | ifp->if_hwassist &= ~BNX_CSUM_FEATURES; |
| 3286 | } |
| 3287 | if (mask & IFCAP_TSO) { |
| 3288 | ifp->if_capenable ^= (mask & IFCAP_TSO); |
| 3289 | if (ifp->if_capenable & IFCAP_TSO) |
| 3290 | ifp->if_hwassist |= CSUM_TSO; |
| 3291 | else |
| 3292 | ifp->if_hwassist &= ~CSUM_TSO; |
| 3293 | } |
| 3294 | break; |
| 3295 | default: |
| 3296 | error = ether_ioctl(ifp, command, data); |
| 3297 | break; |
| 3298 | } |
| 3299 | return error; |
| 3300 | } |
| 3301 | |
| 3302 | static void |
| 3303 | bnx_watchdog(struct ifnet *ifp) |
| 3304 | { |
| 3305 | struct bnx_softc *sc = ifp->if_softc; |
| 3306 | |
| 3307 | if_printf(ifp, "watchdog timeout -- resetting\n"); |
| 3308 | |
| 3309 | bnx_init(sc); |
| 3310 | |
| 3311 | ifp->if_oerrors++; |
| 3312 | |
| 3313 | if (!ifq_is_empty(&ifp->if_snd)) |
| 3314 | if_devstart(ifp); |
| 3315 | } |
| 3316 | |
| 3317 | /* |
| 3318 | * Stop the adapter and free any mbufs allocated to the |
| 3319 | * RX and TX lists. |
| 3320 | */ |
| 3321 | static void |
| 3322 | bnx_stop(struct bnx_softc *sc) |
| 3323 | { |
| 3324 | struct ifnet *ifp = &sc->arpcom.ac_if; |
| 3325 | |
| 3326 | ASSERT_SERIALIZED(ifp->if_serializer); |
| 3327 | |
| 3328 | callout_stop(&sc->bnx_stat_timer); |
| 3329 | |
| 3330 | /* |
| 3331 | * Disable all of the receiver blocks |
| 3332 | */ |
| 3333 | bnx_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); |
| 3334 | bnx_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); |
| 3335 | bnx_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); |
| 3336 | bnx_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); |
| 3337 | bnx_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); |
| 3338 | bnx_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); |
| 3339 | |
| 3340 | /* |
| 3341 | * Disable all of the transmit blocks |
| 3342 | */ |
| 3343 | bnx_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); |
| 3344 | bnx_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); |
| 3345 | bnx_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); |
| 3346 | bnx_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); |
| 3347 | bnx_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); |
| 3348 | bnx_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); |
| 3349 | |
| 3350 | /* |
| 3351 | * Shut down all of the memory managers and related |
| 3352 | * state machines. |
| 3353 | */ |
| 3354 | bnx_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); |
| 3355 | bnx_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); |
| 3356 | CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); |
| 3357 | CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); |
| 3358 | |
| 3359 | /* Disable host interrupts. */ |
| 3360 | bnx_disable_intr(sc); |
| 3361 | |
| 3362 | /* |
| 3363 | * Tell firmware we're shutting down. |
| 3364 | */ |
| 3365 | BNX_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); |
| 3366 | |
| 3367 | /* Free the RX lists. */ |
| 3368 | bnx_free_rx_ring_std(sc); |
| 3369 | |
| 3370 | /* Free jumbo RX list. */ |
| 3371 | if (BNX_IS_JUMBO_CAPABLE(sc)) |
| 3372 | bnx_free_rx_ring_jumbo(sc); |
| 3373 | |
| 3374 | /* Free TX buffers. */ |
| 3375 | bnx_free_tx_ring(sc); |
| 3376 | |
| 3377 | sc->bnx_status_tag = 0; |
| 3378 | sc->bnx_link = 0; |
| 3379 | sc->bnx_coal_chg = 0; |
| 3380 | |
| 3381 | sc->bnx_tx_saved_considx = BNX_TXCONS_UNSET; |
| 3382 | |
| 3383 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
| 3384 | ifp->if_timer = 0; |
| 3385 | } |
| 3386 | |
| 3387 | /* |
| 3388 | * Stop all chip I/O so that the kernel's probe routines don't |
| 3389 | * get confused by errant DMAs when rebooting. |
| 3390 | */ |
| 3391 | static void |
| 3392 | bnx_shutdown(device_t dev) |
| 3393 | { |
| 3394 | struct bnx_softc *sc = device_get_softc(dev); |
| 3395 | struct ifnet *ifp = &sc->arpcom.ac_if; |
| 3396 | |
| 3397 | lwkt_serialize_enter(ifp->if_serializer); |
| 3398 | bnx_stop(sc); |
| 3399 | bnx_reset(sc); |
| 3400 | lwkt_serialize_exit(ifp->if_serializer); |
| 3401 | } |
| 3402 | |
| 3403 | static int |
| 3404 | bnx_suspend(device_t dev) |
| 3405 | { |
| 3406 | struct bnx_softc *sc = device_get_softc(dev); |
| 3407 | struct ifnet *ifp = &sc->arpcom.ac_if; |
| 3408 | |
| 3409 | lwkt_serialize_enter(ifp->if_serializer); |
| 3410 | bnx_stop(sc); |
| 3411 | lwkt_serialize_exit(ifp->if_serializer); |
| 3412 | |
| 3413 | return 0; |
| 3414 | } |
| 3415 | |
| 3416 | static int |
| 3417 | bnx_resume(device_t dev) |
| 3418 | { |
| 3419 | struct bnx_softc *sc = device_get_softc(dev); |
| 3420 | struct ifnet *ifp = &sc->arpcom.ac_if; |
| 3421 | |
| 3422 | lwkt_serialize_enter(ifp->if_serializer); |
| 3423 | |
| 3424 | if (ifp->if_flags & IFF_UP) { |
| 3425 | bnx_init(sc); |
| 3426 | |
| 3427 | if (!ifq_is_empty(&ifp->if_snd)) |
| 3428 | if_devstart(ifp); |
| 3429 | } |
| 3430 | |
| 3431 | lwkt_serialize_exit(ifp->if_serializer); |
| 3432 | |
| 3433 | return 0; |
| 3434 | } |
| 3435 | |
| 3436 | static void |
| 3437 | bnx_setpromisc(struct bnx_softc *sc) |
| 3438 | { |
| 3439 | struct ifnet *ifp = &sc->arpcom.ac_if; |
| 3440 | |
| 3441 | if (ifp->if_flags & IFF_PROMISC) |
| 3442 | BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); |
| 3443 | else |
| 3444 | BNX_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); |
| 3445 | } |
| 3446 | |
| 3447 | static void |
| 3448 | bnx_dma_free(struct bnx_softc *sc) |
| 3449 | { |
| 3450 | int i; |
| 3451 | |
| 3452 | /* Destroy RX mbuf DMA stuffs. */ |
| 3453 | if (sc->bnx_cdata.bnx_rx_mtag != NULL) { |
| 3454 | for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { |
| 3455 | bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag, |
| 3456 | sc->bnx_cdata.bnx_rx_std_dmamap[i]); |
| 3457 | } |
| 3458 | bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag, |
| 3459 | sc->bnx_cdata.bnx_rx_tmpmap); |
| 3460 | bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag); |
| 3461 | } |
| 3462 | |
| 3463 | /* Destroy TX mbuf DMA stuffs. */ |
| 3464 | if (sc->bnx_cdata.bnx_tx_mtag != NULL) { |
| 3465 | for (i = 0; i < BGE_TX_RING_CNT; i++) { |
| 3466 | bus_dmamap_destroy(sc->bnx_cdata.bnx_tx_mtag, |
| 3467 | sc->bnx_cdata.bnx_tx_dmamap[i]); |
| 3468 | } |
| 3469 | bus_dma_tag_destroy(sc->bnx_cdata.bnx_tx_mtag); |
| 3470 | } |
| 3471 | |
| 3472 | /* Destroy standard RX ring */ |
| 3473 | bnx_dma_block_free(sc->bnx_cdata.bnx_rx_std_ring_tag, |
| 3474 | sc->bnx_cdata.bnx_rx_std_ring_map, |
| 3475 | sc->bnx_ldata.bnx_rx_std_ring); |
| 3476 | |
| 3477 | if (BNX_IS_JUMBO_CAPABLE(sc)) |
| 3478 | bnx_free_jumbo_mem(sc); |
| 3479 | |
| 3480 | /* Destroy RX return ring */ |
| 3481 | bnx_dma_block_free(sc->bnx_cdata.bnx_rx_return_ring_tag, |
| 3482 | sc->bnx_cdata.bnx_rx_return_ring_map, |
| 3483 | sc->bnx_ldata.bnx_rx_return_ring); |
| 3484 | |
| 3485 | /* Destroy TX ring */ |
| 3486 | bnx_dma_block_free(sc->bnx_cdata.bnx_tx_ring_tag, |
| 3487 | sc->bnx_cdata.bnx_tx_ring_map, |
| 3488 | sc->bnx_ldata.bnx_tx_ring); |
| 3489 | |
| 3490 | /* Destroy status block */ |
| 3491 | bnx_dma_block_free(sc->bnx_cdata.bnx_status_tag, |
| 3492 | sc->bnx_cdata.bnx_status_map, |
| 3493 | sc->bnx_ldata.bnx_status_block); |
| 3494 | |
| 3495 | /* Destroy the parent tag */ |
| 3496 | if (sc->bnx_cdata.bnx_parent_tag != NULL) |
| 3497 | bus_dma_tag_destroy(sc->bnx_cdata.bnx_parent_tag); |
| 3498 | } |
| 3499 | |
| 3500 | static int |
| 3501 | bnx_dma_alloc(struct bnx_softc *sc) |
| 3502 | { |
| 3503 | struct ifnet *ifp = &sc->arpcom.ac_if; |
| 3504 | bus_size_t txmaxsz; |
| 3505 | int i, error; |
| 3506 | |
| 3507 | /* |
| 3508 | * Allocate the parent bus DMA tag appropriate for PCI. |
| 3509 | * |
| 3510 | * All of the NetExtreme/NetLink controllers have 4GB boundary |
| 3511 | * DMA bug. |
| 3512 | * Whenever an address crosses a multiple of the 4GB boundary |
| 3513 | * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition |
| 3514 | * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA |
| 3515 | * state machine will lockup and cause the device to hang. |
| 3516 | */ |
| 3517 | error = bus_dma_tag_create(NULL, 1, BGE_DMA_BOUNDARY_4G, |
| 3518 | BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, |
| 3519 | NULL, NULL, |
| 3520 | BUS_SPACE_MAXSIZE_32BIT, 0, |
| 3521 | BUS_SPACE_MAXSIZE_32BIT, |
| 3522 | 0, &sc->bnx_cdata.bnx_parent_tag); |
| 3523 | if (error) { |
| 3524 | if_printf(ifp, "could not allocate parent dma tag\n"); |
| 3525 | return error; |
| 3526 | } |
| 3527 | |
| 3528 | /* |
| 3529 | * Create DMA tag and maps for RX mbufs. |
| 3530 | */ |
| 3531 | error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0, |
| 3532 | BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, |
| 3533 | NULL, NULL, MCLBYTES, 1, MCLBYTES, |
| 3534 | BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, |
| 3535 | &sc->bnx_cdata.bnx_rx_mtag); |
| 3536 | if (error) { |
| 3537 | if_printf(ifp, "could not allocate RX mbuf dma tag\n"); |
| 3538 | return error; |
| 3539 | } |
| 3540 | |
| 3541 | error = bus_dmamap_create(sc->bnx_cdata.bnx_rx_mtag, |
| 3542 | BUS_DMA_WAITOK, &sc->bnx_cdata.bnx_rx_tmpmap); |
| 3543 | if (error) { |
| 3544 | bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag); |
| 3545 | sc->bnx_cdata.bnx_rx_mtag = NULL; |
| 3546 | return error; |
| 3547 | } |
| 3548 | |
| 3549 | for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { |
| 3550 | error = bus_dmamap_create(sc->bnx_cdata.bnx_rx_mtag, |
| 3551 | BUS_DMA_WAITOK, |
| 3552 | &sc->bnx_cdata.bnx_rx_std_dmamap[i]); |
| 3553 | if (error) { |
| 3554 | int j; |
| 3555 | |
| 3556 | for (j = 0; j < i; ++j) { |
| 3557 | bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag, |
| 3558 | sc->bnx_cdata.bnx_rx_std_dmamap[j]); |
| 3559 | } |
| 3560 | bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag); |
| 3561 | sc->bnx_cdata.bnx_rx_mtag = NULL; |
| 3562 | |
| 3563 | if_printf(ifp, "could not create DMA map for RX\n"); |
| 3564 | return error; |
| 3565 | } |
| 3566 | } |
| 3567 | |
| 3568 | /* |
| 3569 | * Create DMA tag and maps for TX mbufs. |
| 3570 | */ |
| 3571 | if (sc->bnx_flags & BNX_FLAG_TSO) |
| 3572 | txmaxsz = IP_MAXPACKET + sizeof(struct ether_vlan_header); |
| 3573 | else |
| 3574 | txmaxsz = BNX_JUMBO_FRAMELEN; |
| 3575 | error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0, |
| 3576 | BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, |
| 3577 | NULL, NULL, |
| 3578 | txmaxsz, BNX_NSEG_NEW, PAGE_SIZE, |
| 3579 | BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | |
| 3580 | BUS_DMA_ONEBPAGE, |
| 3581 | &sc->bnx_cdata.bnx_tx_mtag); |
| 3582 | if (error) { |
| 3583 | if_printf(ifp, "could not allocate TX mbuf dma tag\n"); |
| 3584 | return error; |
| 3585 | } |
| 3586 | |
| 3587 | for (i = 0; i < BGE_TX_RING_CNT; i++) { |
| 3588 | error = bus_dmamap_create(sc->bnx_cdata.bnx_tx_mtag, |
| 3589 | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, |
| 3590 | &sc->bnx_cdata.bnx_tx_dmamap[i]); |
| 3591 | if (error) { |
| 3592 | int j; |
| 3593 | |
| 3594 | for (j = 0; j < i; ++j) { |
| 3595 | bus_dmamap_destroy(sc->bnx_cdata.bnx_tx_mtag, |
| 3596 | sc->bnx_cdata.bnx_tx_dmamap[j]); |
| 3597 | } |
| 3598 | bus_dma_tag_destroy(sc->bnx_cdata.bnx_tx_mtag); |
| 3599 | sc->bnx_cdata.bnx_tx_mtag = NULL; |
| 3600 | |
| 3601 | if_printf(ifp, "could not create DMA map for TX\n"); |
| 3602 | return error; |
| 3603 | } |
| 3604 | } |
| 3605 | |
| 3606 | /* |
| 3607 | * Create DMA stuffs for standard RX ring. |
| 3608 | */ |
| 3609 | error = bnx_dma_block_alloc(sc, BGE_STD_RX_RING_SZ, |
| 3610 | &sc->bnx_cdata.bnx_rx_std_ring_tag, |
| 3611 | &sc->bnx_cdata.bnx_rx_std_ring_map, |
| 3612 | (void *)&sc->bnx_ldata.bnx_rx_std_ring, |
| 3613 | &sc->bnx_ldata.bnx_rx_std_ring_paddr); |
| 3614 | if (error) { |
| 3615 | if_printf(ifp, "could not create std RX ring\n"); |
| 3616 | return error; |
| 3617 | } |
| 3618 | |
| 3619 | /* |
| 3620 | * Create jumbo buffer pool. |
| 3621 | */ |
| 3622 | if (BNX_IS_JUMBO_CAPABLE(sc)) { |
| 3623 | error = bnx_alloc_jumbo_mem(sc); |
| 3624 | if (error) { |
| 3625 | if_printf(ifp, "could not create jumbo buffer pool\n"); |
| 3626 | return error; |
| 3627 | } |
| 3628 | } |
| 3629 | |
| 3630 | /* |
| 3631 | * Create DMA stuffs for RX return ring. |
| 3632 | */ |
| 3633 | error = bnx_dma_block_alloc(sc, |
| 3634 | BGE_RX_RTN_RING_SZ(sc->bnx_return_ring_cnt), |
| 3635 | &sc->bnx_cdata.bnx_rx_return_ring_tag, |
| 3636 | &sc->bnx_cdata.bnx_rx_return_ring_map, |
| 3637 | (void *)&sc->bnx_ldata.bnx_rx_return_ring, |
| 3638 | &sc->bnx_ldata.bnx_rx_return_ring_paddr); |
| 3639 | if (error) { |
| 3640 | if_printf(ifp, "could not create RX ret ring\n"); |
| 3641 | return error; |
| 3642 | } |
| 3643 | |
| 3644 | /* |
| 3645 | * Create DMA stuffs for TX ring. |
| 3646 | */ |
| 3647 | error = bnx_dma_block_alloc(sc, BGE_TX_RING_SZ, |
| 3648 | &sc->bnx_cdata.bnx_tx_ring_tag, |
| 3649 | &sc->bnx_cdata.bnx_tx_ring_map, |
| 3650 | (void *)&sc->bnx_ldata.bnx_tx_ring, |
| 3651 | &sc->bnx_ldata.bnx_tx_ring_paddr); |
| 3652 | if (error) { |
| 3653 | if_printf(ifp, "could not create TX ring\n"); |
| 3654 | return error; |
| 3655 | } |
| 3656 | |
| 3657 | /* |
| 3658 | * Create DMA stuffs for status block. |
| 3659 | */ |
| 3660 | error = bnx_dma_block_alloc(sc, BGE_STATUS_BLK_SZ, |
| 3661 | &sc->bnx_cdata.bnx_status_tag, |
| 3662 | &sc->bnx_cdata.bnx_status_map, |
| 3663 | (void *)&sc->bnx_ldata.bnx_status_block, |
| 3664 | &sc->bnx_ldata.bnx_status_block_paddr); |
| 3665 | if (error) { |
| 3666 | if_printf(ifp, "could not create status block\n"); |
| 3667 | return error; |
| 3668 | } |
| 3669 | |
| 3670 | return 0; |
| 3671 | } |
| 3672 | |
| 3673 | static int |
| 3674 | bnx_dma_block_alloc(struct bnx_softc *sc, bus_size_t size, bus_dma_tag_t *tag, |
| 3675 | bus_dmamap_t *map, void **addr, bus_addr_t *paddr) |
| 3676 | { |
| 3677 | bus_dmamem_t dmem; |
| 3678 | int error; |
| 3679 | |
| 3680 | error = bus_dmamem_coherent(sc->bnx_cdata.bnx_parent_tag, PAGE_SIZE, 0, |
| 3681 | BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, |
| 3682 | size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); |
| 3683 | if (error) |
| 3684 | return error; |
| 3685 | |
| 3686 | *tag = dmem.dmem_tag; |
| 3687 | *map = dmem.dmem_map; |
| 3688 | *addr = dmem.dmem_addr; |
| 3689 | *paddr = dmem.dmem_busaddr; |
| 3690 | |
| 3691 | return 0; |
| 3692 | } |
| 3693 | |
| 3694 | static void |
| 3695 | bnx_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr) |
| 3696 | { |
| 3697 | if (tag != NULL) { |
| 3698 | bus_dmamap_unload(tag, map); |
| 3699 | bus_dmamem_free(tag, addr, map); |
| 3700 | bus_dma_tag_destroy(tag); |
| 3701 | } |
| 3702 | } |
| 3703 | |
| 3704 | static void |
| 3705 | bnx_tbi_link_upd(struct bnx_softc *sc, uint32_t status) |
| 3706 | { |
| 3707 | struct ifnet *ifp = &sc->arpcom.ac_if; |
| 3708 | |
| 3709 | #define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE) |
| 3710 | |
| 3711 | /* |
| 3712 | * Sometimes PCS encoding errors are detected in |
| 3713 | * TBI mode (on fiber NICs), and for some reason |
| 3714 | * the chip will signal them as link changes. |
| 3715 | * If we get a link change event, but the 'PCS |
| 3716 | * encoding error' bit in the MAC status register |
| 3717 | * is set, don't bother doing a link check. |
| 3718 | * This avoids spurious "gigabit link up" messages |
| 3719 | * that sometimes appear on fiber NICs during |
| 3720 | * periods of heavy traffic. |
| 3721 | */ |
| 3722 | if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { |
| 3723 | if (!sc->bnx_link) { |
| 3724 | sc->bnx_link++; |
| 3725 | if (sc->bnx_asicrev == BGE_ASICREV_BCM5704) { |
| 3726 | BNX_CLRBIT(sc, BGE_MAC_MODE, |
| 3727 | BGE_MACMODE_TBI_SEND_CFGS); |
| 3728 | } |
| 3729 | CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); |
| 3730 | |
| 3731 | if (bootverbose) |
| 3732 | if_printf(ifp, "link UP\n"); |
| 3733 | |
| 3734 | ifp->if_link_state = LINK_STATE_UP; |
| 3735 | if_link_state_change(ifp); |
| 3736 | } |
| 3737 | } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) { |
| 3738 | if (sc->bnx_link) { |
| 3739 | sc->bnx_link = 0; |
| 3740 | |
| 3741 | if (bootverbose) |
| 3742 | if_printf(ifp, "link DOWN\n"); |
| 3743 | |
| 3744 | ifp->if_link_state = LINK_STATE_DOWN; |
| 3745 | if_link_state_change(ifp); |
| 3746 | } |
| 3747 | } |
| 3748 | |
| 3749 | #undef PCS_ENCODE_ERR |
| 3750 | |
| 3751 | /* Clear the attention. */ |
| 3752 | CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | |
| 3753 | BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | |
| 3754 | BGE_MACSTAT_LINK_CHANGED); |
| 3755 | } |
| 3756 | |
| 3757 | static void |
| 3758 | bnx_copper_link_upd(struct bnx_softc *sc, uint32_t status __unused) |
| 3759 | { |
| 3760 | struct ifnet *ifp = &sc->arpcom.ac_if; |
| 3761 | struct mii_data *mii = device_get_softc(sc->bnx_miibus); |
| 3762 | |
| 3763 | mii_pollstat(mii); |
| 3764 | bnx_miibus_statchg(sc->bnx_dev); |
| 3765 | |
| 3766 | if (bootverbose) { |
| 3767 | if (sc->bnx_link) |
| 3768 | if_printf(ifp, "link UP\n"); |
| 3769 | else |
| 3770 | if_printf(ifp, "link DOWN\n"); |
| 3771 | } |
| 3772 | |
| 3773 | /* Clear the attention. */ |
| 3774 | CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | |
| 3775 | BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | |
| 3776 | BGE_MACSTAT_LINK_CHANGED); |
| 3777 | } |
| 3778 | |
| 3779 | static void |
| 3780 | bnx_autopoll_link_upd(struct bnx_softc *sc, uint32_t status __unused) |
| 3781 | { |
| 3782 | struct ifnet *ifp = &sc->arpcom.ac_if; |
| 3783 | struct mii_data *mii = device_get_softc(sc->bnx_miibus); |
| 3784 | |
| 3785 | mii_pollstat(mii); |
| 3786 | |
| 3787 | if (!sc->bnx_link && |
| 3788 | (mii->mii_media_status & IFM_ACTIVE) && |
| 3789 | IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { |
| 3790 | sc->bnx_link++; |
| 3791 | if (bootverbose) |
| 3792 | if_printf(ifp, "link UP\n"); |
| 3793 | } else if (sc->bnx_link && |
| 3794 | (!(mii->mii_media_status & IFM_ACTIVE) || |
| 3795 | IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { |
| 3796 | sc->bnx_link = 0; |
| 3797 | if (bootverbose) |
| 3798 | if_printf(ifp, "link DOWN\n"); |
| 3799 | } |
| 3800 | |
| 3801 | /* Clear the attention. */ |
| 3802 | CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | |
| 3803 | BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | |
| 3804 | BGE_MACSTAT_LINK_CHANGED); |
| 3805 | } |
| 3806 | |
| 3807 | static int |
| 3808 | bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS) |
| 3809 | { |
| 3810 | struct bnx_softc *sc = arg1; |
| 3811 | |
| 3812 | return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, |
| 3813 | &sc->bnx_rx_coal_ticks, |
| 3814 | BNX_RX_COAL_TICKS_MIN, BNX_RX_COAL_TICKS_MAX, |
| 3815 | BNX_RX_COAL_TICKS_CHG); |
| 3816 | } |
| 3817 | |
| 3818 | static int |
| 3819 | bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS) |
| 3820 | { |
| 3821 | struct bnx_softc *sc = arg1; |
| 3822 | |
| 3823 | return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, |
| 3824 | &sc->bnx_tx_coal_ticks, |
| 3825 | BNX_TX_COAL_TICKS_MIN, BNX_TX_COAL_TICKS_MAX, |
| 3826 | BNX_TX_COAL_TICKS_CHG); |
| 3827 | } |
| 3828 | |
| 3829 | static int |
| 3830 | bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS) |
| 3831 | { |
| 3832 | struct bnx_softc *sc = arg1; |
| 3833 | |
| 3834 | return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, |
| 3835 | &sc->bnx_rx_coal_bds, |
| 3836 | BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX, |
| 3837 | BNX_RX_COAL_BDS_CHG); |
| 3838 | } |
| 3839 | |
| 3840 | static int |
| 3841 | bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS) |
| 3842 | { |
| 3843 | struct bnx_softc *sc = arg1; |
| 3844 | |
| 3845 | return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, |
| 3846 | &sc->bnx_tx_coal_bds, |
| 3847 | BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX, |
| 3848 | BNX_TX_COAL_BDS_CHG); |
| 3849 | } |
| 3850 | |
| 3851 | static int |
| 3852 | bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS) |
| 3853 | { |
| 3854 | struct bnx_softc *sc = arg1; |
| 3855 | |
| 3856 | return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, |
| 3857 | &sc->bnx_rx_coal_bds_int, |
| 3858 | BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX, |
| 3859 | BNX_RX_COAL_BDS_INT_CHG); |
| 3860 | } |
| 3861 | |
| 3862 | static int |
| 3863 | bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS) |
| 3864 | { |
| 3865 | struct bnx_softc *sc = arg1; |
| 3866 | |
| 3867 | return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, |
| 3868 | &sc->bnx_tx_coal_bds_int, |
| 3869 | BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX, |
| 3870 | BNX_TX_COAL_BDS_INT_CHG); |
| 3871 | } |
| 3872 | |
| 3873 | static int |
| 3874 | bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal, |
| 3875 | int coal_min, int coal_max, uint32_t coal_chg_mask) |
| 3876 | { |
| 3877 | struct bnx_softc *sc = arg1; |
| 3878 | struct ifnet *ifp = &sc->arpcom.ac_if; |
| 3879 | int error = 0, v; |
| 3880 | |
| 3881 | lwkt_serialize_enter(ifp->if_serializer); |
| 3882 | |
| 3883 | v = *coal; |
| 3884 | error = sysctl_handle_int(oidp, &v, 0, req); |
| 3885 | if (!error && req->newptr != NULL) { |
| 3886 | if (v < coal_min || v > coal_max) { |
| 3887 | error = EINVAL; |
| 3888 | } else { |
| 3889 | *coal = v; |
| 3890 | sc->bnx_coal_chg |= coal_chg_mask; |
| 3891 | } |
| 3892 | } |
| 3893 | |
| 3894 | lwkt_serialize_exit(ifp->if_serializer); |
| 3895 | return error; |
| 3896 | } |
| 3897 | |
| 3898 | static void |
| 3899 | bnx_coal_change(struct bnx_softc *sc) |
| 3900 | { |
| 3901 | struct ifnet *ifp = &sc->arpcom.ac_if; |
| 3902 | uint32_t val; |
| 3903 | |
| 3904 | ASSERT_SERIALIZED(ifp->if_serializer); |
| 3905 | |
| 3906 | if (sc->bnx_coal_chg & BNX_RX_COAL_TICKS_CHG) { |
| 3907 | CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, |
| 3908 | sc->bnx_rx_coal_ticks); |
| 3909 | DELAY(10); |
| 3910 | val = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS); |
| 3911 | |
| 3912 | if (bootverbose) { |
| 3913 | if_printf(ifp, "rx_coal_ticks -> %u\n", |
| 3914 | sc->bnx_rx_coal_ticks); |
| 3915 | } |
| 3916 | } |
| 3917 | |
| 3918 | if (sc->bnx_coal_chg & BNX_TX_COAL_TICKS_CHG) { |
| 3919 | CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, |
| 3920 | sc->bnx_tx_coal_ticks); |
| 3921 | DELAY(10); |
| 3922 | val = CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS); |
| 3923 | |
| 3924 | if (bootverbose) { |
| 3925 | if_printf(ifp, "tx_coal_ticks -> %u\n", |
| 3926 | sc->bnx_tx_coal_ticks); |
| 3927 | } |
| 3928 | } |
| 3929 | |
| 3930 | if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_CHG) { |
| 3931 | CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, |
| 3932 | sc->bnx_rx_coal_bds); |
| 3933 | DELAY(10); |
| 3934 | val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS); |
| 3935 | |
| 3936 | if (bootverbose) { |
| 3937 | if_printf(ifp, "rx_coal_bds -> %u\n", |
| 3938 | sc->bnx_rx_coal_bds); |
| 3939 | } |
| 3940 | } |
| 3941 | |
| 3942 | if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_CHG) { |
| 3943 | CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, |
| 3944 | sc->bnx_tx_coal_bds); |
| 3945 | DELAY(10); |
| 3946 | val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS); |
| 3947 | |
| 3948 | if (bootverbose) { |
| 3949 | if_printf(ifp, "tx_max_coal_bds -> %u\n", |
| 3950 | sc->bnx_tx_coal_bds); |
| 3951 | } |
| 3952 | } |
| 3953 | |
| 3954 | if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_INT_CHG) { |
| 3955 | CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, |
| 3956 | sc->bnx_rx_coal_bds_int); |
| 3957 | DELAY(10); |
| 3958 | val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT); |
| 3959 | |
| 3960 | if (bootverbose) { |
| 3961 | if_printf(ifp, "rx_coal_bds_int -> %u\n", |
| 3962 | sc->bnx_rx_coal_bds_int); |
| 3963 | } |
| 3964 | } |
| 3965 | |
| 3966 | if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_INT_CHG) { |
| 3967 | CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, |
| 3968 | sc->bnx_tx_coal_bds_int); |
| 3969 | DELAY(10); |
| 3970 | val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT); |
| 3971 | |
| 3972 | if (bootverbose) { |
| 3973 | if_printf(ifp, "tx_coal_bds_int -> %u\n", |
| 3974 | sc->bnx_tx_coal_bds_int); |
| 3975 | } |
| 3976 | } |
| 3977 | |
| 3978 | sc->bnx_coal_chg = 0; |
| 3979 | } |
| 3980 | |
| 3981 | static void |
| 3982 | bnx_intr_check(void *xsc) |
| 3983 | { |
| 3984 | struct bnx_softc *sc = xsc; |
| 3985 | struct ifnet *ifp = &sc->arpcom.ac_if; |
| 3986 | struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block; |
| 3987 | |
| 3988 | lwkt_serialize_enter(ifp->if_serializer); |
| 3989 | |
| 3990 | KKASSERT(mycpuid == sc->bnx_intr_cpuid); |
| 3991 | |
| 3992 | if ((ifp->if_flags & (IFF_RUNNING | IFF_POLLING)) != IFF_RUNNING) { |
| 3993 | lwkt_serialize_exit(ifp->if_serializer); |
| 3994 | return; |
| 3995 | } |
| 3996 | |
| 3997 | if (sblk->bge_idx[0].bge_rx_prod_idx != sc->bnx_rx_saved_considx || |
| 3998 | sblk->bge_idx[0].bge_tx_cons_idx != sc->bnx_tx_saved_considx) { |
| 3999 | if (sc->bnx_rx_check_considx == sc->bnx_rx_saved_considx && |
| 4000 | sc->bnx_tx_check_considx == sc->bnx_tx_saved_considx) { |
| 4001 | if (!sc->bnx_intr_maylose) { |
| 4002 | sc->bnx_intr_maylose = TRUE; |
| 4003 | goto done; |
| 4004 | } |
| 4005 | if (bootverbose) |
| 4006 | if_printf(ifp, "lost interrupt\n"); |
| 4007 | bnx_msi(sc); |
| 4008 | } |
| 4009 | } |
| 4010 | sc->bnx_intr_maylose = FALSE; |
| 4011 | sc->bnx_rx_check_considx = sc->bnx_rx_saved_considx; |
| 4012 | sc->bnx_tx_check_considx = sc->bnx_tx_saved_considx; |
| 4013 | |
| 4014 | done: |
| 4015 | callout_reset(&sc->bnx_intr_timer, BNX_INTR_CKINTVL, |
| 4016 | bnx_intr_check, sc); |
| 4017 | lwkt_serialize_exit(ifp->if_serializer); |
| 4018 | } |
| 4019 | |
| 4020 | static void |
| 4021 | bnx_enable_intr(struct bnx_softc *sc) |
| 4022 | { |
| 4023 | struct ifnet *ifp = &sc->arpcom.ac_if; |
| 4024 | |
| 4025 | lwkt_serialize_handler_enable(ifp->if_serializer); |
| 4026 | |
| 4027 | /* |
| 4028 | * Enable interrupt. |
| 4029 | */ |
| 4030 | bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24); |
| 4031 | if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) { |
| 4032 | /* XXX Linux driver */ |
| 4033 | bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24); |
| 4034 | } |
| 4035 | |
| 4036 | /* |
| 4037 | * Unmask the interrupt when we stop polling. |
| 4038 | */ |
| 4039 | PCI_CLRBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, |
| 4040 | BGE_PCIMISCCTL_MASK_PCI_INTR, 4); |
| 4041 | |
| 4042 | /* |
| 4043 | * Trigger another interrupt, since above writing |
| 4044 | * to interrupt mailbox0 may acknowledge pending |
| 4045 | * interrupt. |
| 4046 | */ |
| 4047 | BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); |
| 4048 | |
| 4049 | if (sc->bnx_flags & BNX_FLAG_STATUSTAG_BUG) { |
| 4050 | sc->bnx_intr_maylose = FALSE; |
| 4051 | sc->bnx_rx_check_considx = 0; |
| 4052 | sc->bnx_tx_check_considx = 0; |
| 4053 | |
| 4054 | if (bootverbose) |
| 4055 | if_printf(ifp, "status tag bug workaround\n"); |
| 4056 | |
| 4057 | /* 10ms check interval */ |
| 4058 | callout_reset_bycpu(&sc->bnx_intr_timer, BNX_INTR_CKINTVL, |
| 4059 | bnx_intr_check, sc, sc->bnx_intr_cpuid); |
| 4060 | } |
| 4061 | } |
| 4062 | |
| 4063 | static void |
| 4064 | bnx_disable_intr(struct bnx_softc *sc) |
| 4065 | { |
| 4066 | struct ifnet *ifp = &sc->arpcom.ac_if; |
| 4067 | |
| 4068 | /* |
| 4069 | * Mask the interrupt when we start polling. |
| 4070 | */ |
| 4071 | PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, |
| 4072 | BGE_PCIMISCCTL_MASK_PCI_INTR, 4); |
| 4073 | |
| 4074 | /* |
| 4075 | * Acknowledge possible asserted interrupt. |
| 4076 | */ |
| 4077 | bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1); |
| 4078 | |
| 4079 | callout_stop(&sc->bnx_intr_timer); |
| 4080 | sc->bnx_intr_maylose = FALSE; |
| 4081 | sc->bnx_rx_check_considx = 0; |
| 4082 | sc->bnx_tx_check_considx = 0; |
| 4083 | |
| 4084 | lwkt_serialize_handler_disable(ifp->if_serializer); |
| 4085 | } |
| 4086 | |
| 4087 | static int |
| 4088 | bnx_get_eaddr_mem(struct bnx_softc *sc, uint8_t ether_addr[]) |
| 4089 | { |
| 4090 | uint32_t mac_addr; |
| 4091 | int ret = 1; |
| 4092 | |
| 4093 | mac_addr = bnx_readmem_ind(sc, 0x0c14); |
| 4094 | if ((mac_addr >> 16) == 0x484b) { |
| 4095 | ether_addr[0] = (uint8_t)(mac_addr >> 8); |
| 4096 | ether_addr[1] = (uint8_t)mac_addr; |
| 4097 | mac_addr = bnx_readmem_ind(sc, 0x0c18); |
| 4098 | ether_addr[2] = (uint8_t)(mac_addr >> 24); |
| 4099 | ether_addr[3] = (uint8_t)(mac_addr >> 16); |
| 4100 | ether_addr[4] = (uint8_t)(mac_addr >> 8); |
| 4101 | ether_addr[5] = (uint8_t)mac_addr; |
| 4102 | ret = 0; |
| 4103 | } |
| 4104 | return ret; |
| 4105 | } |
| 4106 | |
| 4107 | static int |
| 4108 | bnx_get_eaddr_nvram(struct bnx_softc *sc, uint8_t ether_addr[]) |
| 4109 | { |
| 4110 | int mac_offset = BGE_EE_MAC_OFFSET; |
| 4111 | |
| 4112 | if (BNX_IS_5717_PLUS(sc)) { |
| 4113 | int f; |
| 4114 | |
| 4115 | f = pci_get_function(sc->bnx_dev); |
| 4116 | if (f & 1) |
| 4117 | mac_offset = BGE_EE_MAC_OFFSET_5717; |
| 4118 | if (f > 1) |
| 4119 | mac_offset += BGE_EE_MAC_OFFSET_5717_OFF; |
| 4120 | } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) { |
| 4121 | mac_offset = BGE_EE_MAC_OFFSET_5906; |
| 4122 | } |
| 4123 | |
| 4124 | return bnx_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN); |
| 4125 | } |
| 4126 | |
| 4127 | static int |
| 4128 | bnx_get_eaddr_eeprom(struct bnx_softc *sc, uint8_t ether_addr[]) |
| 4129 | { |
| 4130 | if (sc->bnx_flags & BNX_FLAG_NO_EEPROM) |
| 4131 | return 1; |
| 4132 | |
| 4133 | return bnx_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, |
| 4134 | ETHER_ADDR_LEN); |
| 4135 | } |
| 4136 | |
| 4137 | static int |
| 4138 | bnx_get_eaddr(struct bnx_softc *sc, uint8_t eaddr[]) |
| 4139 | { |
| 4140 | static const bnx_eaddr_fcn_t bnx_eaddr_funcs[] = { |
| 4141 | /* NOTE: Order is critical */ |
| 4142 | bnx_get_eaddr_mem, |
| 4143 | bnx_get_eaddr_nvram, |
| 4144 | bnx_get_eaddr_eeprom, |
| 4145 | NULL |
| 4146 | }; |
| 4147 | const bnx_eaddr_fcn_t *func; |
| 4148 | |
| 4149 | for (func = bnx_eaddr_funcs; *func != NULL; ++func) { |
| 4150 | if ((*func)(sc, eaddr) == 0) |
| 4151 | break; |
| 4152 | } |
| 4153 | return (*func == NULL ? ENXIO : 0); |
| 4154 | } |
| 4155 | |
| 4156 | /* |
| 4157 | * NOTE: 'm' is not freed upon failure |
| 4158 | */ |
| 4159 | struct mbuf * |
| 4160 | bnx_defrag_shortdma(struct mbuf *m) |
| 4161 | { |
| 4162 | struct mbuf *n; |
| 4163 | int found; |
| 4164 | |
| 4165 | /* |
| 4166 | * If device receive two back-to-back send BDs with less than |
| 4167 | * or equal to 8 total bytes then the device may hang. The two |
| 4168 | * back-to-back send BDs must in the same frame for this failure |
| 4169 | * to occur. Scan mbuf chains and see whether two back-to-back |
| 4170 | * send BDs are there. If this is the case, allocate new mbuf |
| 4171 | * and copy the frame to workaround the silicon bug. |
| 4172 | */ |
| 4173 | for (n = m, found = 0; n != NULL; n = n->m_next) { |
| 4174 | if (n->m_len < 8) { |
| 4175 | found++; |
| 4176 | if (found > 1) |
| 4177 | break; |
| 4178 | continue; |
| 4179 | } |
| 4180 | found = 0; |
| 4181 | } |
| 4182 | |
| 4183 | if (found > 1) |
| 4184 | n = m_defrag(m, MB_DONTWAIT); |
| 4185 | else |
| 4186 | n = m; |
| 4187 | return n; |
| 4188 | } |
| 4189 | |
| 4190 | static void |
| 4191 | bnx_stop_block(struct bnx_softc *sc, bus_size_t reg, uint32_t bit) |
| 4192 | { |
| 4193 | int i; |
| 4194 | |
| 4195 | BNX_CLRBIT(sc, reg, bit); |
| 4196 | for (i = 0; i < BNX_TIMEOUT; i++) { |
| 4197 | if ((CSR_READ_4(sc, reg) & bit) == 0) |
| 4198 | return; |
| 4199 | DELAY(100); |
| 4200 | } |
| 4201 | } |
| 4202 | |
| 4203 | static void |
| 4204 | bnx_link_poll(struct bnx_softc *sc) |
| 4205 | { |
| 4206 | uint32_t status; |
| 4207 | |
| 4208 | status = CSR_READ_4(sc, BGE_MAC_STS); |
| 4209 | if ((status & sc->bnx_link_chg) || sc->bnx_link_evt) { |
| 4210 | sc->bnx_link_evt = 0; |
| 4211 | sc->bnx_link_upd(sc, status); |
| 4212 | } |
| 4213 | } |
| 4214 | |
| 4215 | static void |
| 4216 | bnx_enable_msi(struct bnx_softc *sc) |
| 4217 | { |
| 4218 | uint32_t msi_mode; |
| 4219 | |
| 4220 | msi_mode = CSR_READ_4(sc, BGE_MSI_MODE); |
| 4221 | msi_mode |= BGE_MSIMODE_ENABLE; |
| 4222 | if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) { |
| 4223 | /* |
| 4224 | * NOTE: |
| 4225 | * 5718-PG105-R says that "one shot" mode |
| 4226 | * does not work if MSI is used, however, |
| 4227 | * it obviously works. |
| 4228 | */ |
| 4229 | msi_mode &= ~BGE_MSIMODE_ONESHOT_DISABLE; |
| 4230 | } |
| 4231 | CSR_WRITE_4(sc, BGE_MSI_MODE, msi_mode); |
| 4232 | } |
| 4233 | |
| 4234 | static uint32_t |
| 4235 | bnx_dma_swap_options(struct bnx_softc *sc) |
| 4236 | { |
| 4237 | uint32_t dma_options; |
| 4238 | |
| 4239 | dma_options = BGE_MODECTL_WORDSWAP_NONFRAME | |
| 4240 | BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA; |
| 4241 | #if BYTE_ORDER == BIG_ENDIAN |
| 4242 | dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME; |
| 4243 | #endif |
| 4244 | if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) { |
| 4245 | dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA | |
| 4246 | BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE | |
| 4247 | BGE_MODECTL_HTX2B_ENABLE; |
| 4248 | } |
| 4249 | return dma_options; |
| 4250 | } |
| 4251 | |
| 4252 | static int |
| 4253 | bnx_setup_tso(struct bnx_softc *sc, struct mbuf **mp, |
| 4254 | uint16_t *mss0, uint16_t *flags0) |
| 4255 | { |
| 4256 | struct mbuf *m; |
| 4257 | struct ip *ip; |
| 4258 | struct tcphdr *th; |
| 4259 | int thoff, iphlen, hoff, hlen; |
| 4260 | uint16_t flags, mss; |
| 4261 | |
| 4262 | m = *mp; |
| 4263 | KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); |
| 4264 | |
| 4265 | hoff = m->m_pkthdr.csum_lhlen; |
| 4266 | iphlen = m->m_pkthdr.csum_iphlen; |
| 4267 | thoff = m->m_pkthdr.csum_thlen; |
| 4268 | |
| 4269 | KASSERT(hoff > 0, ("invalid ether header len")); |
| 4270 | KASSERT(iphlen > 0, ("invalid ip header len")); |
| 4271 | KASSERT(thoff > 0, ("invalid tcp header len")); |
| 4272 | |
| 4273 | if (__predict_false(m->m_len < hoff + iphlen + thoff)) { |
| 4274 | m = m_pullup(m, hoff + iphlen + thoff); |
| 4275 | if (m == NULL) { |
| 4276 | *mp = NULL; |
| 4277 | return ENOBUFS; |
| 4278 | } |
| 4279 | *mp = m; |
| 4280 | } |
| 4281 | ip = mtodoff(m, struct ip *, hoff); |
| 4282 | th = mtodoff(m, struct tcphdr *, hoff + iphlen); |
| 4283 | |
| 4284 | mss = m->m_pkthdr.tso_segsz; |
| 4285 | flags = BGE_TXBDFLAG_CPU_PRE_DMA | BGE_TXBDFLAG_CPU_POST_DMA; |
| 4286 | |
| 4287 | ip->ip_len = htons(mss + iphlen + thoff); |
| 4288 | th->th_sum = 0; |
| 4289 | |
| 4290 | hlen = (iphlen + thoff) >> 2; |
| 4291 | mss |= ((hlen & 0x3) << 14); |
| 4292 | flags |= ((hlen & 0xf8) << 7) | ((hlen & 0x4) << 2); |
| 4293 | |
| 4294 | *mss0 = mss; |
| 4295 | *flags0 = flags; |
| 4296 | |
| 4297 | return 0; |
| 4298 | } |