bnx: Utilize BNX_IS_5717_PLUS
[dragonfly.git] / sys / dev / netif / bnx / if_bnx.c
CommitLineData
6c8d8ecc
SZ
1/*
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
34 */
35
36
37#include "opt_polling.h"
38
39#include <sys/param.h>
40#include <sys/bus.h>
41#include <sys/endian.h>
42#include <sys/kernel.h>
43#include <sys/interrupt.h>
44#include <sys/mbuf.h>
45#include <sys/malloc.h>
46#include <sys/queue.h>
47#include <sys/rman.h>
48#include <sys/serialize.h>
49#include <sys/socket.h>
50#include <sys/sockio.h>
51#include <sys/sysctl.h>
52
53#include <net/bpf.h>
54#include <net/ethernet.h>
55#include <net/if.h>
56#include <net/if_arp.h>
57#include <net/if_dl.h>
58#include <net/if_media.h>
59#include <net/if_types.h>
60#include <net/ifq_var.h>
61#include <net/vlan/if_vlan_var.h>
62#include <net/vlan/if_vlan_ether.h>
63
64#include <dev/netif/mii_layer/mii.h>
65#include <dev/netif/mii_layer/miivar.h>
66#include <dev/netif/mii_layer/brgphyreg.h>
67
68#include <bus/pci/pcidevs.h>
69#include <bus/pci/pcireg.h>
70#include <bus/pci/pcivar.h>
71
72#include <dev/netif/bge/if_bgereg.h>
73#include <dev/netif/bnx/if_bnxvar.h>
74
75/* "device miibus" required. See GENERIC if you get errors here. */
76#include "miibus_if.h"
77
78#define BNX_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
79
80static const struct bnx_type {
81 uint16_t bnx_vid;
82 uint16_t bnx_did;
83 char *bnx_name;
84} bnx_devs[] = {
85 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717,
86 "Broadcom BCM5717 Gigabit Ethernet" },
87 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718,
88 "Broadcom BCM5718 Gigabit Ethernet" },
89 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5719,
90 "Broadcom BCM5719 Gigabit Ethernet" },
91 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720_ALT,
92 "Broadcom BCM5720 Gigabit Ethernet" },
93
94 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57761,
95 "Broadcom BCM57761 Gigabit Ethernet" },
96 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57781,
97 "Broadcom BCM57781 Gigabit Ethernet" },
98 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57791,
99 "Broadcom BCM57791 Fast Ethernet" },
100 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57765,
101 "Broadcom BCM57765 Gigabit Ethernet" },
102 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57785,
103 "Broadcom BCM57785 Gigabit Ethernet" },
104 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57795,
105 "Broadcom BCM57795 Fast Ethernet" },
106
107 { 0, 0, NULL }
108};
109
110#define BNX_IS_JUMBO_CAPABLE(sc) ((sc)->bnx_flags & BNX_FLAG_JUMBO)
111#define BNX_IS_5717_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_5717_PLUS)
f368d0d9
SZ
112#define BNX_IS_57765_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_57765_PLUS)
113#define BNX_IS_57765_FAMILY(sc) \
114 ((sc)->bnx_flags & BNX_FLAG_57765_FAMILY)
6c8d8ecc
SZ
115
116typedef int (*bnx_eaddr_fcn_t)(struct bnx_softc *, uint8_t[]);
117
118static int bnx_probe(device_t);
119static int bnx_attach(device_t);
120static int bnx_detach(device_t);
121static void bnx_shutdown(device_t);
122static int bnx_suspend(device_t);
123static int bnx_resume(device_t);
124static int bnx_miibus_readreg(device_t, int, int);
125static int bnx_miibus_writereg(device_t, int, int, int);
126static void bnx_miibus_statchg(device_t);
127
128#ifdef DEVICE_POLLING
129static void bnx_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
130#endif
131static void bnx_intr_legacy(void *);
132static void bnx_msi(void *);
133static void bnx_msi_oneshot(void *);
134static void bnx_intr(struct bnx_softc *);
135static void bnx_enable_intr(struct bnx_softc *);
136static void bnx_disable_intr(struct bnx_softc *);
137static void bnx_txeof(struct bnx_softc *, uint16_t);
138static void bnx_rxeof(struct bnx_softc *, uint16_t);
139
140static void bnx_start(struct ifnet *);
141static int bnx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
142static void bnx_init(void *);
143static void bnx_stop(struct bnx_softc *);
144static void bnx_watchdog(struct ifnet *);
145static int bnx_ifmedia_upd(struct ifnet *);
146static void bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *);
147static void bnx_tick(void *);
148
149static int bnx_alloc_jumbo_mem(struct bnx_softc *);
150static void bnx_free_jumbo_mem(struct bnx_softc *);
151static struct bnx_jslot
152 *bnx_jalloc(struct bnx_softc *);
153static void bnx_jfree(void *);
154static void bnx_jref(void *);
155static int bnx_newbuf_std(struct bnx_softc *, int, int);
156static int bnx_newbuf_jumbo(struct bnx_softc *, int, int);
157static void bnx_setup_rxdesc_std(struct bnx_softc *, int);
158static void bnx_setup_rxdesc_jumbo(struct bnx_softc *, int);
159static int bnx_init_rx_ring_std(struct bnx_softc *);
160static void bnx_free_rx_ring_std(struct bnx_softc *);
161static int bnx_init_rx_ring_jumbo(struct bnx_softc *);
162static void bnx_free_rx_ring_jumbo(struct bnx_softc *);
163static void bnx_free_tx_ring(struct bnx_softc *);
164static int bnx_init_tx_ring(struct bnx_softc *);
165static int bnx_dma_alloc(struct bnx_softc *);
166static void bnx_dma_free(struct bnx_softc *);
167static int bnx_dma_block_alloc(struct bnx_softc *, bus_size_t,
168 bus_dma_tag_t *, bus_dmamap_t *, void **, bus_addr_t *);
169static void bnx_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
170static struct mbuf *
171 bnx_defrag_shortdma(struct mbuf *);
172static int bnx_encap(struct bnx_softc *, struct mbuf **, uint32_t *);
173
174static void bnx_reset(struct bnx_softc *);
175static int bnx_chipinit(struct bnx_softc *);
176static int bnx_blockinit(struct bnx_softc *);
177static void bnx_stop_block(struct bnx_softc *, bus_size_t, uint32_t);
178static void bnx_enable_msi(struct bnx_softc *sc);
179static void bnx_setmulti(struct bnx_softc *);
180static void bnx_setpromisc(struct bnx_softc *);
181static void bnx_stats_update_regs(struct bnx_softc *);
182static uint32_t bnx_dma_swap_options(struct bnx_softc *);
183
184static uint32_t bnx_readmem_ind(struct bnx_softc *, uint32_t);
185static void bnx_writemem_ind(struct bnx_softc *, uint32_t, uint32_t);
186#ifdef notdef
187static uint32_t bnx_readreg_ind(struct bnx_softc *, uint32_t);
188#endif
189static void bnx_writereg_ind(struct bnx_softc *, uint32_t, uint32_t);
190static void bnx_writemem_direct(struct bnx_softc *, uint32_t, uint32_t);
191static void bnx_writembx(struct bnx_softc *, int, int);
192static uint8_t bnx_nvram_getbyte(struct bnx_softc *, int, uint8_t *);
193static int bnx_read_nvram(struct bnx_softc *, caddr_t, int, int);
194static uint8_t bnx_eeprom_getbyte(struct bnx_softc *, uint32_t, uint8_t *);
195static int bnx_read_eeprom(struct bnx_softc *, caddr_t, uint32_t, size_t);
196
197static void bnx_tbi_link_upd(struct bnx_softc *, uint32_t);
198static void bnx_copper_link_upd(struct bnx_softc *, uint32_t);
199static void bnx_autopoll_link_upd(struct bnx_softc *, uint32_t);
200static void bnx_link_poll(struct bnx_softc *);
201
202static int bnx_get_eaddr_mem(struct bnx_softc *, uint8_t[]);
203static int bnx_get_eaddr_nvram(struct bnx_softc *, uint8_t[]);
204static int bnx_get_eaddr_eeprom(struct bnx_softc *, uint8_t[]);
205static int bnx_get_eaddr(struct bnx_softc *, uint8_t[]);
206
207static void bnx_coal_change(struct bnx_softc *);
208static int bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS);
209static int bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS);
210static int bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS);
211static int bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS);
212static int bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS);
213static int bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS);
214static int bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *,
215 int, int, uint32_t);
216
217static int bnx_msi_enable = 1;
218TUNABLE_INT("hw.bnx.msi.enable", &bnx_msi_enable);
219
220static device_method_t bnx_methods[] = {
221 /* Device interface */
222 DEVMETHOD(device_probe, bnx_probe),
223 DEVMETHOD(device_attach, bnx_attach),
224 DEVMETHOD(device_detach, bnx_detach),
225 DEVMETHOD(device_shutdown, bnx_shutdown),
226 DEVMETHOD(device_suspend, bnx_suspend),
227 DEVMETHOD(device_resume, bnx_resume),
228
229 /* bus interface */
230 DEVMETHOD(bus_print_child, bus_generic_print_child),
231 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
232
233 /* MII interface */
234 DEVMETHOD(miibus_readreg, bnx_miibus_readreg),
235 DEVMETHOD(miibus_writereg, bnx_miibus_writereg),
236 DEVMETHOD(miibus_statchg, bnx_miibus_statchg),
237
238 { 0, 0 }
239};
240
241static DEFINE_CLASS_0(bnx, bnx_driver, bnx_methods, sizeof(struct bnx_softc));
242static devclass_t bnx_devclass;
243
244DECLARE_DUMMY_MODULE(if_bnx);
245DRIVER_MODULE(if_bnx, pci, bnx_driver, bnx_devclass, NULL, NULL);
246DRIVER_MODULE(miibus, bnx, miibus_driver, miibus_devclass, NULL, NULL);
247
248static uint32_t
249bnx_readmem_ind(struct bnx_softc *sc, uint32_t off)
250{
251 device_t dev = sc->bnx_dev;
252 uint32_t val;
253
254 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
255 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
256 return 0;
257
258 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
259 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
260 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
261 return (val);
262}
263
264static void
265bnx_writemem_ind(struct bnx_softc *sc, uint32_t off, uint32_t val)
266{
267 device_t dev = sc->bnx_dev;
268
269 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
270 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
271 return;
272
273 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
274 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
275 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
276}
277
278#ifdef notdef
279static uint32_t
280bnx_readreg_ind(struct bnx_softc *sc, uin32_t off)
281{
282 device_t dev = sc->bnx_dev;
283
284 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
285 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
286}
287#endif
288
289static void
290bnx_writereg_ind(struct bnx_softc *sc, uint32_t off, uint32_t val)
291{
292 device_t dev = sc->bnx_dev;
293
294 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
295 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
296}
297
298static void
299bnx_writemem_direct(struct bnx_softc *sc, uint32_t off, uint32_t val)
300{
301 CSR_WRITE_4(sc, off, val);
302}
303
304static void
305bnx_writembx(struct bnx_softc *sc, int off, int val)
306{
307 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906)
308 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
309
310 CSR_WRITE_4(sc, off, val);
311}
312
313static uint8_t
314bnx_nvram_getbyte(struct bnx_softc *sc, int addr, uint8_t *dest)
315{
316 uint32_t access, byte = 0;
317 int i;
318
319 /* Lock. */
320 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
321 for (i = 0; i < 8000; i++) {
322 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
323 break;
324 DELAY(20);
325 }
326 if (i == 8000)
327 return (1);
328
329 /* Enable access. */
330 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
331 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
332
333 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
334 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
335 for (i = 0; i < BNX_TIMEOUT * 10; i++) {
336 DELAY(10);
337 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
338 DELAY(10);
339 break;
340 }
341 }
342
343 if (i == BNX_TIMEOUT * 10) {
344 if_printf(&sc->arpcom.ac_if, "nvram read timed out\n");
345 return (1);
346 }
347
348 /* Get result. */
349 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
350
351 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
352
353 /* Disable access. */
354 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
355
356 /* Unlock. */
357 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
358 CSR_READ_4(sc, BGE_NVRAM_SWARB);
359
360 return (0);
361}
362
363/*
364 * Read a sequence of bytes from NVRAM.
365 */
366static int
367bnx_read_nvram(struct bnx_softc *sc, caddr_t dest, int off, int cnt)
368{
369 int err = 0, i;
370 uint8_t byte = 0;
371
372 if (sc->bnx_asicrev != BGE_ASICREV_BCM5906)
373 return (1);
374
375 for (i = 0; i < cnt; i++) {
376 err = bnx_nvram_getbyte(sc, off + i, &byte);
377 if (err)
378 break;
379 *(dest + i) = byte;
380 }
381
382 return (err ? 1 : 0);
383}
384
385/*
386 * Read a byte of data stored in the EEPROM at address 'addr.' The
387 * BCM570x supports both the traditional bitbang interface and an
388 * auto access interface for reading the EEPROM. We use the auto
389 * access method.
390 */
391static uint8_t
392bnx_eeprom_getbyte(struct bnx_softc *sc, uint32_t addr, uint8_t *dest)
393{
394 int i;
395 uint32_t byte = 0;
396
397 /*
398 * Enable use of auto EEPROM access so we can avoid
399 * having to use the bitbang method.
400 */
401 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
402
403 /* Reset the EEPROM, load the clock period. */
404 CSR_WRITE_4(sc, BGE_EE_ADDR,
405 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
406 DELAY(20);
407
408 /* Issue the read EEPROM command. */
409 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
410
411 /* Wait for completion */
412 for(i = 0; i < BNX_TIMEOUT * 10; i++) {
413 DELAY(10);
414 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
415 break;
416 }
417
418 if (i == BNX_TIMEOUT) {
419 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
420 return(1);
421 }
422
423 /* Get result. */
424 byte = CSR_READ_4(sc, BGE_EE_DATA);
425
426 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
427
428 return(0);
429}
430
431/*
432 * Read a sequence of bytes from the EEPROM.
433 */
434static int
435bnx_read_eeprom(struct bnx_softc *sc, caddr_t dest, uint32_t off, size_t len)
436{
437 size_t i;
438 int err;
439 uint8_t byte;
440
441 for (byte = 0, err = 0, i = 0; i < len; i++) {
442 err = bnx_eeprom_getbyte(sc, off + i, &byte);
443 if (err)
444 break;
445 *(dest + i) = byte;
446 }
447
448 return(err ? 1 : 0);
449}
450
451static int
452bnx_miibus_readreg(device_t dev, int phy, int reg)
453{
454 struct bnx_softc *sc = device_get_softc(dev);
455 uint32_t val;
456 int i;
457
458 KASSERT(phy == sc->bnx_phyno,
459 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno));
460
461 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
462 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
463 CSR_WRITE_4(sc, BGE_MI_MODE,
464 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
465 DELAY(80);
466 }
467
468 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
469 BGE_MIPHY(phy) | BGE_MIREG(reg));
470
471 /* Poll for the PHY register access to complete. */
472 for (i = 0; i < BNX_TIMEOUT; i++) {
473 DELAY(10);
474 val = CSR_READ_4(sc, BGE_MI_COMM);
475 if ((val & BGE_MICOMM_BUSY) == 0) {
476 DELAY(5);
477 val = CSR_READ_4(sc, BGE_MI_COMM);
478 break;
479 }
480 }
481 if (i == BNX_TIMEOUT) {
482 if_printf(&sc->arpcom.ac_if, "PHY read timed out "
483 "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val);
484 val = 0;
485 }
486
487 /* Restore the autopoll bit if necessary. */
488 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
489 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
490 DELAY(80);
491 }
492
493 if (val & BGE_MICOMM_READFAIL)
494 return 0;
495
496 return (val & 0xFFFF);
497}
498
499static int
500bnx_miibus_writereg(device_t dev, int phy, int reg, int val)
501{
502 struct bnx_softc *sc = device_get_softc(dev);
503 int i;
504
505 KASSERT(phy == sc->bnx_phyno,
506 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno));
507
508 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
509 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
510 return 0;
511
512 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
513 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
514 CSR_WRITE_4(sc, BGE_MI_MODE,
515 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
516 DELAY(80);
517 }
518
519 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
520 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
521
522 for (i = 0; i < BNX_TIMEOUT; i++) {
523 DELAY(10);
524 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
525 DELAY(5);
526 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
527 break;
528 }
529 }
530 if (i == BNX_TIMEOUT) {
531 if_printf(&sc->arpcom.ac_if, "PHY write timed out "
532 "(phy %d, reg %d, val %d)\n", phy, reg, val);
533 }
534
535 /* Restore the autopoll bit if necessary. */
536 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
537 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
538 DELAY(80);
539 }
540
541 return 0;
542}
543
544static void
545bnx_miibus_statchg(device_t dev)
546{
547 struct bnx_softc *sc;
548 struct mii_data *mii;
549
550 sc = device_get_softc(dev);
551 mii = device_get_softc(sc->bnx_miibus);
552
553 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
554 (IFM_ACTIVE | IFM_AVALID)) {
555 switch (IFM_SUBTYPE(mii->mii_media_active)) {
556 case IFM_10_T:
557 case IFM_100_TX:
558 sc->bnx_link = 1;
559 break;
560 case IFM_1000_T:
561 case IFM_1000_SX:
562 case IFM_2500_SX:
563 if (sc->bnx_asicrev != BGE_ASICREV_BCM5906)
564 sc->bnx_link = 1;
565 else
566 sc->bnx_link = 0;
567 break;
568 default:
569 sc->bnx_link = 0;
570 break;
571 }
572 } else {
573 sc->bnx_link = 0;
574 }
575 if (sc->bnx_link == 0)
576 return;
577
578 BNX_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
579 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
580 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
581 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
582 } else {
583 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
584 }
585
586 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
587 BNX_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
588 } else {
589 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
590 }
591}
592
593/*
594 * Memory management for jumbo frames.
595 */
596static int
597bnx_alloc_jumbo_mem(struct bnx_softc *sc)
598{
599 struct ifnet *ifp = &sc->arpcom.ac_if;
600 struct bnx_jslot *entry;
601 uint8_t *ptr;
602 bus_addr_t paddr;
603 int i, error;
604
605 /*
606 * Create tag for jumbo mbufs.
607 * This is really a bit of a kludge. We allocate a special
608 * jumbo buffer pool which (thanks to the way our DMA
609 * memory allocation works) will consist of contiguous
610 * pages. This means that even though a jumbo buffer might
611 * be larger than a page size, we don't really need to
612 * map it into more than one DMA segment. However, the
613 * default mbuf tag will result in multi-segment mappings,
614 * so we have to create a special jumbo mbuf tag that
615 * lets us get away with mapping the jumbo buffers as
616 * a single segment. I think eventually the driver should
617 * be changed so that it uses ordinary mbufs and cluster
618 * buffers, i.e. jumbo frames can span multiple DMA
619 * descriptors. But that's a project for another day.
620 */
621
622 /*
623 * Create DMA stuffs for jumbo RX ring.
624 */
625 error = bnx_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ,
626 &sc->bnx_cdata.bnx_rx_jumbo_ring_tag,
627 &sc->bnx_cdata.bnx_rx_jumbo_ring_map,
628 (void *)&sc->bnx_ldata.bnx_rx_jumbo_ring,
629 &sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
630 if (error) {
631 if_printf(ifp, "could not create jumbo RX ring\n");
632 return error;
633 }
634
635 /*
636 * Create DMA stuffs for jumbo buffer block.
637 */
638 error = bnx_dma_block_alloc(sc, BNX_JMEM,
639 &sc->bnx_cdata.bnx_jumbo_tag,
640 &sc->bnx_cdata.bnx_jumbo_map,
641 (void **)&sc->bnx_ldata.bnx_jumbo_buf,
642 &paddr);
643 if (error) {
644 if_printf(ifp, "could not create jumbo buffer\n");
645 return error;
646 }
647
648 SLIST_INIT(&sc->bnx_jfree_listhead);
649
650 /*
651 * Now divide it up into 9K pieces and save the addresses
652 * in an array. Note that we play an evil trick here by using
653 * the first few bytes in the buffer to hold the the address
654 * of the softc structure for this interface. This is because
655 * bnx_jfree() needs it, but it is called by the mbuf management
656 * code which will not pass it to us explicitly.
657 */
658 for (i = 0, ptr = sc->bnx_ldata.bnx_jumbo_buf; i < BNX_JSLOTS; i++) {
659 entry = &sc->bnx_cdata.bnx_jslots[i];
660 entry->bnx_sc = sc;
661 entry->bnx_buf = ptr;
662 entry->bnx_paddr = paddr;
663 entry->bnx_inuse = 0;
664 entry->bnx_slot = i;
665 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead, entry, jslot_link);
666
667 ptr += BNX_JLEN;
668 paddr += BNX_JLEN;
669 }
670 return 0;
671}
672
673static void
674bnx_free_jumbo_mem(struct bnx_softc *sc)
675{
676 /* Destroy jumbo RX ring. */
677 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_jumbo_ring_tag,
678 sc->bnx_cdata.bnx_rx_jumbo_ring_map,
679 sc->bnx_ldata.bnx_rx_jumbo_ring);
680
681 /* Destroy jumbo buffer block. */
682 bnx_dma_block_free(sc->bnx_cdata.bnx_jumbo_tag,
683 sc->bnx_cdata.bnx_jumbo_map,
684 sc->bnx_ldata.bnx_jumbo_buf);
685}
686
687/*
688 * Allocate a jumbo buffer.
689 */
690static struct bnx_jslot *
691bnx_jalloc(struct bnx_softc *sc)
692{
693 struct bnx_jslot *entry;
694
695 lwkt_serialize_enter(&sc->bnx_jslot_serializer);
696 entry = SLIST_FIRST(&sc->bnx_jfree_listhead);
697 if (entry) {
698 SLIST_REMOVE_HEAD(&sc->bnx_jfree_listhead, jslot_link);
699 entry->bnx_inuse = 1;
700 } else {
701 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
702 }
703 lwkt_serialize_exit(&sc->bnx_jslot_serializer);
704 return(entry);
705}
706
707/*
708 * Adjust usage count on a jumbo buffer.
709 */
710static void
711bnx_jref(void *arg)
712{
713 struct bnx_jslot *entry = (struct bnx_jslot *)arg;
714 struct bnx_softc *sc = entry->bnx_sc;
715
716 if (sc == NULL)
717 panic("bnx_jref: can't find softc pointer!");
718
719 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) {
720 panic("bnx_jref: asked to reference buffer "
721 "that we don't manage!");
722 } else if (entry->bnx_inuse == 0) {
723 panic("bnx_jref: buffer already free!");
724 } else {
725 atomic_add_int(&entry->bnx_inuse, 1);
726 }
727}
728
729/*
730 * Release a jumbo buffer.
731 */
732static void
733bnx_jfree(void *arg)
734{
735 struct bnx_jslot *entry = (struct bnx_jslot *)arg;
736 struct bnx_softc *sc = entry->bnx_sc;
737
738 if (sc == NULL)
739 panic("bnx_jfree: can't find softc pointer!");
740
741 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) {
742 panic("bnx_jfree: asked to free buffer that we don't manage!");
743 } else if (entry->bnx_inuse == 0) {
744 panic("bnx_jfree: buffer already free!");
745 } else {
746 /*
747 * Possible MP race to 0, use the serializer. The atomic insn
748 * is still needed for races against bnx_jref().
749 */
750 lwkt_serialize_enter(&sc->bnx_jslot_serializer);
751 atomic_subtract_int(&entry->bnx_inuse, 1);
752 if (entry->bnx_inuse == 0) {
753 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead,
754 entry, jslot_link);
755 }
756 lwkt_serialize_exit(&sc->bnx_jslot_serializer);
757 }
758}
759
760
761/*
762 * Intialize a standard receive ring descriptor.
763 */
764static int
765bnx_newbuf_std(struct bnx_softc *sc, int i, int init)
766{
767 struct mbuf *m_new = NULL;
768 bus_dma_segment_t seg;
769 bus_dmamap_t map;
770 int error, nsegs;
771
772 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
773 if (m_new == NULL)
774 return ENOBUFS;
775 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
776 m_adj(m_new, ETHER_ALIGN);
777
778 error = bus_dmamap_load_mbuf_segment(sc->bnx_cdata.bnx_rx_mtag,
779 sc->bnx_cdata.bnx_rx_tmpmap, m_new,
780 &seg, 1, &nsegs, BUS_DMA_NOWAIT);
781 if (error) {
782 m_freem(m_new);
783 return error;
784 }
785
786 if (!init) {
787 bus_dmamap_sync(sc->bnx_cdata.bnx_rx_mtag,
788 sc->bnx_cdata.bnx_rx_std_dmamap[i],
789 BUS_DMASYNC_POSTREAD);
790 bus_dmamap_unload(sc->bnx_cdata.bnx_rx_mtag,
791 sc->bnx_cdata.bnx_rx_std_dmamap[i]);
792 }
793
794 map = sc->bnx_cdata.bnx_rx_tmpmap;
795 sc->bnx_cdata.bnx_rx_tmpmap = sc->bnx_cdata.bnx_rx_std_dmamap[i];
796 sc->bnx_cdata.bnx_rx_std_dmamap[i] = map;
797
798 sc->bnx_cdata.bnx_rx_std_chain[i].bnx_mbuf = m_new;
799 sc->bnx_cdata.bnx_rx_std_chain[i].bnx_paddr = seg.ds_addr;
800
801 bnx_setup_rxdesc_std(sc, i);
802 return 0;
803}
804
805static void
806bnx_setup_rxdesc_std(struct bnx_softc *sc, int i)
807{
808 struct bnx_rxchain *rc;
809 struct bge_rx_bd *r;
810
811 rc = &sc->bnx_cdata.bnx_rx_std_chain[i];
812 r = &sc->bnx_ldata.bnx_rx_std_ring[i];
813
814 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_paddr);
815 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_paddr);
816 r->bge_len = rc->bnx_mbuf->m_len;
817 r->bge_idx = i;
818 r->bge_flags = BGE_RXBDFLAG_END;
819}
820
821/*
822 * Initialize a jumbo receive ring descriptor. This allocates
823 * a jumbo buffer from the pool managed internally by the driver.
824 */
825static int
826bnx_newbuf_jumbo(struct bnx_softc *sc, int i, int init)
827{
828 struct mbuf *m_new = NULL;
829 struct bnx_jslot *buf;
830 bus_addr_t paddr;
831
832 /* Allocate the mbuf. */
833 MGETHDR(m_new, init ? MB_WAIT : MB_DONTWAIT, MT_DATA);
834 if (m_new == NULL)
835 return ENOBUFS;
836
837 /* Allocate the jumbo buffer */
838 buf = bnx_jalloc(sc);
839 if (buf == NULL) {
840 m_freem(m_new);
841 return ENOBUFS;
842 }
843
844 /* Attach the buffer to the mbuf. */
845 m_new->m_ext.ext_arg = buf;
846 m_new->m_ext.ext_buf = buf->bnx_buf;
847 m_new->m_ext.ext_free = bnx_jfree;
848 m_new->m_ext.ext_ref = bnx_jref;
849 m_new->m_ext.ext_size = BNX_JUMBO_FRAMELEN;
850
851 m_new->m_flags |= M_EXT;
852
853 m_new->m_data = m_new->m_ext.ext_buf;
854 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
855
856 paddr = buf->bnx_paddr;
857 m_adj(m_new, ETHER_ALIGN);
858 paddr += ETHER_ALIGN;
859
860 /* Save necessary information */
861 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_mbuf = m_new;
862 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_paddr = paddr;
863
864 /* Set up the descriptor. */
865 bnx_setup_rxdesc_jumbo(sc, i);
866 return 0;
867}
868
869static void
870bnx_setup_rxdesc_jumbo(struct bnx_softc *sc, int i)
871{
872 struct bge_rx_bd *r;
873 struct bnx_rxchain *rc;
874
875 r = &sc->bnx_ldata.bnx_rx_jumbo_ring[i];
876 rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i];
877
878 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_paddr);
879 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_paddr);
880 r->bge_len = rc->bnx_mbuf->m_len;
881 r->bge_idx = i;
882 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
883}
884
885static int
886bnx_init_rx_ring_std(struct bnx_softc *sc)
887{
888 int i, error;
889
890 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
891 error = bnx_newbuf_std(sc, i, 1);
892 if (error)
893 return error;
894 };
895
896 sc->bnx_std = BGE_STD_RX_RING_CNT - 1;
897 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bnx_std);
898
899 return(0);
900}
901
902static void
903bnx_free_rx_ring_std(struct bnx_softc *sc)
904{
905 int i;
906
907 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
908 struct bnx_rxchain *rc = &sc->bnx_cdata.bnx_rx_std_chain[i];
909
910 if (rc->bnx_mbuf != NULL) {
911 bus_dmamap_unload(sc->bnx_cdata.bnx_rx_mtag,
912 sc->bnx_cdata.bnx_rx_std_dmamap[i]);
913 m_freem(rc->bnx_mbuf);
914 rc->bnx_mbuf = NULL;
915 }
916 bzero(&sc->bnx_ldata.bnx_rx_std_ring[i],
917 sizeof(struct bge_rx_bd));
918 }
919}
920
921static int
922bnx_init_rx_ring_jumbo(struct bnx_softc *sc)
923{
924 struct bge_rcb *rcb;
925 int i, error;
926
927 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
928 error = bnx_newbuf_jumbo(sc, i, 1);
929 if (error)
930 return error;
931 };
932
933 sc->bnx_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
934
935 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb;
936 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
937 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
938
939 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo);
940
941 return(0);
942}
943
944static void
945bnx_free_rx_ring_jumbo(struct bnx_softc *sc)
946{
947 int i;
948
949 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
950 struct bnx_rxchain *rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i];
951
952 if (rc->bnx_mbuf != NULL) {
953 m_freem(rc->bnx_mbuf);
954 rc->bnx_mbuf = NULL;
955 }
956 bzero(&sc->bnx_ldata.bnx_rx_jumbo_ring[i],
957 sizeof(struct bge_rx_bd));
958 }
959}
960
961static void
962bnx_free_tx_ring(struct bnx_softc *sc)
963{
964 int i;
965
966 for (i = 0; i < BGE_TX_RING_CNT; i++) {
967 if (sc->bnx_cdata.bnx_tx_chain[i] != NULL) {
968 bus_dmamap_unload(sc->bnx_cdata.bnx_tx_mtag,
969 sc->bnx_cdata.bnx_tx_dmamap[i]);
970 m_freem(sc->bnx_cdata.bnx_tx_chain[i]);
971 sc->bnx_cdata.bnx_tx_chain[i] = NULL;
972 }
973 bzero(&sc->bnx_ldata.bnx_tx_ring[i],
974 sizeof(struct bge_tx_bd));
975 }
976}
977
978static int
979bnx_init_tx_ring(struct bnx_softc *sc)
980{
981 sc->bnx_txcnt = 0;
982 sc->bnx_tx_saved_considx = 0;
983 sc->bnx_tx_prodidx = 0;
984
985 /* Initialize transmit producer index for host-memory send ring. */
986 bnx_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bnx_tx_prodidx);
987 bnx_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
988
989 return(0);
990}
991
992static void
993bnx_setmulti(struct bnx_softc *sc)
994{
995 struct ifnet *ifp;
996 struct ifmultiaddr *ifma;
997 uint32_t hashes[4] = { 0, 0, 0, 0 };
998 int h, i;
999
1000 ifp = &sc->arpcom.ac_if;
1001
1002 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1003 for (i = 0; i < 4; i++)
1004 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1005 return;
1006 }
1007
1008 /* First, zot all the existing filters. */
1009 for (i = 0; i < 4; i++)
1010 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1011
1012 /* Now program new ones. */
1013 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1014 if (ifma->ifma_addr->sa_family != AF_LINK)
1015 continue;
1016 h = ether_crc32_le(
1017 LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1018 ETHER_ADDR_LEN) & 0x7f;
1019 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1020 }
1021
1022 for (i = 0; i < 4; i++)
1023 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1024}
1025
1026/*
1027 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1028 * self-test results.
1029 */
1030static int
1031bnx_chipinit(struct bnx_softc *sc)
1032{
1033 uint32_t dma_rw_ctl, mode_ctl;
1034 int i;
1035
1036 /* Set endian type before we access any non-PCI registers. */
1037 pci_write_config(sc->bnx_dev, BGE_PCI_MISC_CTL,
1038 BGE_INIT | BGE_PCIMISCCTL_TAGGED_STATUS, 4);
1039
1040 /* Clear the MAC control register */
1041 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1042
1043 /*
1044 * Clear the MAC statistics block in the NIC's
1045 * internal memory.
1046 */
1047 for (i = BGE_STATS_BLOCK;
1048 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1049 BNX_MEMWIN_WRITE(sc, i, 0);
1050
1051 for (i = BGE_STATUS_BLOCK;
1052 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1053 BNX_MEMWIN_WRITE(sc, i, 0);
1054
1055 /* Set up the PCI DMA control register. */
1056 dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD |
1057 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1058
f368d0d9 1059 if (BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
1060 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1061 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0)
1062 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1063 /*
1064 * Enable HW workaround for controllers that misinterpret
1065 * a status tag update and leave interrupts permanently
1066 * disabled.
1067 */
1068 if (sc->bnx_asicrev != BGE_ASICREV_BCM5717 &&
1069 sc->bnx_asicrev != BGE_ASICREV_BCM57765)
1070 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1071 }
1072 pci_write_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1073
1074 /*
1075 * Set up general mode register.
1076 */
1077 mode_ctl = bnx_dma_swap_options(sc) | BGE_MODECTL_MAC_ATTN_INTR |
1078 BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM;
1079 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1080
1081 /*
1082 * Disable memory write invalidate. Apparently it is not supported
1083 * properly by these devices. Also ensure that INTx isn't disabled,
1084 * as these chips need it even when using MSI.
1085 */
1086 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_CMD,
1087 (PCIM_CMD_MWRICEN | PCIM_CMD_INTxDIS), 4);
1088
1089 /* Set the timer prescaler (always 66Mhz) */
1090 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1091
1092 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
1093 DELAY(40); /* XXX */
1094
1095 /* Put PHY into ready state */
1096 BNX_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1097 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1098 DELAY(40);
1099 }
1100
1101 return(0);
1102}
1103
1104static int
1105bnx_blockinit(struct bnx_softc *sc)
1106{
1107 struct bge_rcb *rcb;
1108 bus_size_t vrcb;
1109 bge_hostaddr taddr;
1110 uint32_t val;
1111 int i, limit;
1112
1113 /*
1114 * Initialize the memory window pointer register so that
1115 * we can access the first 32K of internal NIC RAM. This will
1116 * allow us to set up the TX send ring RCBs and the RX return
1117 * ring RCBs, plus other things which live in NIC memory.
1118 */
1119 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1120
1121 /* Configure mbuf pool watermarks */
f368d0d9 1122 if (BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
1123 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1124 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) {
1125 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1126 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1127 } else {
1128 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1129 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1130 }
1131 } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
1132 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1133 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1134 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1135 } else {
1136 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1137 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1138 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1139 }
1140
1141 /* Configure DMA resource watermarks */
1142 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1143 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1144
1145 /* Enable buffer manager */
1146 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1147 /*
1148 * Change the arbitration algorithm of TXMBUF read request to
1149 * round-robin instead of priority based for BCM5719. When
1150 * TXFIFO is almost empty, RDMA will hold its request until
1151 * TXFIFO is not almost empty.
1152 */
1153 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719)
1154 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
1155 CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1156
1157 /* Poll for buffer manager start indication */
1158 for (i = 0; i < BNX_TIMEOUT; i++) {
1159 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1160 break;
1161 DELAY(10);
1162 }
1163
1164 if (i == BNX_TIMEOUT) {
1165 if_printf(&sc->arpcom.ac_if,
1166 "buffer manager failed to start\n");
1167 return(ENXIO);
1168 }
1169
1170 /* Enable flow-through queues */
1171 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1172 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1173
1174 /* Wait until queue initialization is complete */
1175 for (i = 0; i < BNX_TIMEOUT; i++) {
1176 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1177 break;
1178 DELAY(10);
1179 }
1180
1181 if (i == BNX_TIMEOUT) {
1182 if_printf(&sc->arpcom.ac_if,
1183 "flow-through queue init failed\n");
1184 return(ENXIO);
1185 }
1186
1187 /*
1188 * Summary of rings supported by the controller:
1189 *
1190 * Standard Receive Producer Ring
1191 * - This ring is used to feed receive buffers for "standard"
1192 * sized frames (typically 1536 bytes) to the controller.
1193 *
1194 * Jumbo Receive Producer Ring
1195 * - This ring is used to feed receive buffers for jumbo sized
1196 * frames (i.e. anything bigger than the "standard" frames)
1197 * to the controller.
1198 *
1199 * Mini Receive Producer Ring
1200 * - This ring is used to feed receive buffers for "mini"
1201 * sized frames to the controller.
1202 * - This feature required external memory for the controller
1203 * but was never used in a production system. Should always
1204 * be disabled.
1205 *
1206 * Receive Return Ring
1207 * - After the controller has placed an incoming frame into a
1208 * receive buffer that buffer is moved into a receive return
1209 * ring. The driver is then responsible to passing the
1210 * buffer up to the stack. Many versions of the controller
1211 * support multiple RR rings.
1212 *
1213 * Send Ring
1214 * - This ring is used for outgoing frames. Many versions of
1215 * the controller support multiple send rings.
1216 */
1217
1218 /* Initialize the standard receive producer ring control block. */
1219 rcb = &sc->bnx_ldata.bnx_info.bnx_std_rx_rcb;
1220 rcb->bge_hostaddr.bge_addr_lo =
1221 BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_std_ring_paddr);
1222 rcb->bge_hostaddr.bge_addr_hi =
1223 BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_std_ring_paddr);
f368d0d9 1224 if (BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
1225 /*
1226 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1227 * Bits 15-2 : Maximum RX frame size
1228 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
1229 * Bit 0 : Reserved
1230 */
1231 rcb->bge_maxlen_flags =
1232 BGE_RCB_MAXLEN_FLAGS(512, BNX_MAX_FRAMELEN << 2);
1233 } else {
1234 /*
1235 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1236 * Bits 15-2 : Reserved (should be 0)
1237 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1238 * Bit 0 : Reserved
1239 */
1240 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1241 }
1242 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1243 sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1244 sc->bnx_asicrev == BGE_ASICREV_BCM5720)
1245 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1246 else
1247 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1248 /* Write the standard receive producer ring control block. */
1249 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1250 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1251 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1252 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1253 /* Reset the standard receive producer ring producer index. */
1254 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1255
1256 /*
1257 * Initialize the jumbo RX producer ring control
1258 * block. We set the 'ring disabled' bit in the
1259 * flags field until we're actually ready to start
1260 * using this ring (i.e. once we set the MTU
1261 * high enough to require it).
1262 */
1263 if (BNX_IS_JUMBO_CAPABLE(sc)) {
1264 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb;
1265 /* Get the jumbo receive producer ring RCB parameters. */
1266 rcb->bge_hostaddr.bge_addr_lo =
1267 BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
1268 rcb->bge_hostaddr.bge_addr_hi =
1269 BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
1270 rcb->bge_maxlen_flags =
1271 BGE_RCB_MAXLEN_FLAGS(BNX_MAX_FRAMELEN,
1272 BGE_RCB_FLAG_RING_DISABLED);
1273 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1274 sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1275 sc->bnx_asicrev == BGE_ASICREV_BCM5720)
1276 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1277 else
1278 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1279 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1280 rcb->bge_hostaddr.bge_addr_hi);
1281 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1282 rcb->bge_hostaddr.bge_addr_lo);
1283 /* Program the jumbo receive producer ring RCB parameters. */
1284 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1285 rcb->bge_maxlen_flags);
1286 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1287 /* Reset the jumbo receive producer ring producer index. */
1288 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1289 }
1290
1291 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
1292 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
1293 (sc->bnx_chipid == BGE_CHIPID_BCM5906_A0 ||
1294 sc->bnx_chipid == BGE_CHIPID_BCM5906_A1 ||
1295 sc->bnx_chipid == BGE_CHIPID_BCM5906_A2)) {
1296 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1297 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1298 }
1299
1300 /*
1301 * The BD ring replenish thresholds control how often the
1302 * hardware fetches new BD's from the producer rings in host
1303 * memory. Setting the value too low on a busy system can
1304 * starve the hardware and recue the throughpout.
1305 *
1306 * Set the BD ring replentish thresholds. The recommended
1307 * values are 1/8th the number of descriptors allocated to
1308 * each ring.
1309 */
1310 val = 8;
1311 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1312 if (BNX_IS_JUMBO_CAPABLE(sc)) {
1313 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1314 BGE_JUMBO_RX_RING_CNT/8);
1315 }
f368d0d9 1316 if (BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
1317 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1318 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1319 }
1320
1321 /*
1322 * Disable all send rings by setting the 'ring disabled' bit
1323 * in the flags field of all the TX send ring control blocks,
1324 * located in NIC memory.
1325 */
80969639
SZ
1326 if (BNX_IS_5717_PLUS(sc))
1327 limit = 4;
1328 else
1329 limit = 1;
6c8d8ecc
SZ
1330 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1331 for (i = 0; i < limit; i++) {
1332 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1333 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1334 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1335 vrcb += sizeof(struct bge_rcb);
1336 }
1337
1338 /* Configure send ring RCB 0 (we use only the first ring) */
1339 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1340 BGE_HOSTADDR(taddr, sc->bnx_ldata.bnx_tx_ring_paddr);
1341 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1342 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1343 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1344 sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1345 sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1346 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
1347 } else {
1348 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1349 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1350 }
1351 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1352 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1353
1354 /*
1355 * Disable all receive return rings by setting the
1356 * 'ring disabled' bit in the flags field of all the receive
1357 * return ring control blocks, located in NIC memory.
1358 */
80969639 1359 if (BNX_IS_5717_PLUS(sc)) {
6c8d8ecc
SZ
1360 /* Should be 17, use 16 until we get an SRAM map. */
1361 limit = 16;
1362 } else if (sc->bnx_asicrev == BGE_ASICREV_BCM57765) {
1363 limit = 4;
1364 } else {
1365 limit = 1;
1366 }
1367 /* Disable all receive return rings. */
1368 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1369 for (i = 0; i < limit; i++) {
1370 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1371 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1372 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1373 BGE_RCB_FLAG_RING_DISABLED);
1374 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1375 bnx_writembx(sc, BGE_MBX_RX_CONS0_LO +
1376 (i * (sizeof(uint64_t))), 0);
1377 vrcb += sizeof(struct bge_rcb);
1378 }
1379
1380 /*
1381 * Set up receive return ring 0. Note that the NIC address
1382 * for RX return rings is 0x0. The return rings live entirely
1383 * within the host, so the nicaddr field in the RCB isn't used.
1384 */
1385 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1386 BGE_HOSTADDR(taddr, sc->bnx_ldata.bnx_rx_return_ring_paddr);
1387 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1388 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1389 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1390 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1391 BGE_RCB_MAXLEN_FLAGS(sc->bnx_return_ring_cnt, 0));
1392
1393 /* Set random backoff seed for TX */
1394 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1395 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1396 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1397 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1398 BGE_TX_BACKOFF_SEED_MASK);
1399
1400 /* Set inter-packet gap */
1401 val = 0x2620;
1402 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1403 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
1404 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
1405 }
1406 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
1407
1408 /*
1409 * Specify which ring to use for packets that don't match
1410 * any RX rules.
1411 */
1412 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1413
1414 /*
1415 * Configure number of RX lists. One interrupt distribution
1416 * list, sixteen active lists, one bad frames class.
1417 */
1418 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1419
1420 /* Inialize RX list placement stats mask. */
1421 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1422 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1423
1424 /* Disable host coalescing until we get it set up */
1425 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1426
1427 /* Poll to make sure it's shut down. */
1428 for (i = 0; i < BNX_TIMEOUT; i++) {
1429 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1430 break;
1431 DELAY(10);
1432 }
1433
1434 if (i == BNX_TIMEOUT) {
1435 if_printf(&sc->arpcom.ac_if,
1436 "host coalescing engine failed to idle\n");
1437 return(ENXIO);
1438 }
1439
1440 /* Set up host coalescing defaults */
1441 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bnx_rx_coal_ticks);
1442 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bnx_tx_coal_ticks);
1443 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bnx_rx_coal_bds);
1444 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bnx_tx_coal_bds);
1445 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, sc->bnx_rx_coal_bds_int);
1446 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, sc->bnx_tx_coal_bds_int);
1447
1448 /* Set up address of status block */
1449 bzero(sc->bnx_ldata.bnx_status_block, BGE_STATUS_BLK_SZ);
1450 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1451 BGE_ADDR_HI(sc->bnx_ldata.bnx_status_block_paddr));
1452 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1453 BGE_ADDR_LO(sc->bnx_ldata.bnx_status_block_paddr));
1454
1455 /* Set up status block partail update size. */
1456 val = BGE_STATBLKSZ_32BYTE;
1457#if 0
1458 /*
1459 * Does not seem to have visible effect in both
1460 * bulk data (1472B UDP datagram) and tiny data
1461 * (18B UDP datagram) TX tests.
1462 */
1463 val |= BGE_HCCMODE_CLRTICK_TX;
1464#endif
1465 /* Turn on host coalescing state machine */
1466 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1467
1468 /* Turn on RX BD completion state machine and enable attentions */
1469 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1470 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1471
1472 /* Turn on RX list placement state machine */
1473 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1474
1475 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1476 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1477 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1478 BGE_MACMODE_FRMHDR_DMA_ENB;
1479
1480 if (sc->bnx_flags & BNX_FLAG_TBI)
1481 val |= BGE_PORTMODE_TBI;
1482 else if (sc->bnx_flags & BNX_FLAG_MII_SERDES)
1483 val |= BGE_PORTMODE_GMII;
1484 else
1485 val |= BGE_PORTMODE_MII;
1486
1487 /* Turn on DMA, clear stats */
1488 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1489
1490 /* Set misc. local control, enable interrupts on attentions */
1491 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1492
1493#ifdef notdef
1494 /* Assert GPIO pins for PHY reset */
1495 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1496 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1497 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1498 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1499#endif
1500
1501 /* Turn on write DMA state machine */
1502 val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1503 /* Enable host coalescing bug fix. */
1504 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1505 if (sc->bnx_asicrev == BGE_ASICREV_BCM5785) {
1506 /* Request larger DMA burst size to get better performance. */
1507 val |= BGE_WDMAMODE_BURST_ALL_DATA;
1508 }
1509 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1510 DELAY(40);
1511
1512 if (sc->bnx_asicrev == BGE_ASICREV_BCM5761 ||
1513 sc->bnx_asicrev == BGE_ASICREV_BCM5784 ||
1514 sc->bnx_asicrev == BGE_ASICREV_BCM5785 ||
1515 sc->bnx_asicrev == BGE_ASICREV_BCM57780 ||
f368d0d9 1516 BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
1517 uint32_t dmactl;
1518
1519 dmactl = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL);
1520 /*
1521 * Adjust tx margin to prevent TX data corruption and
1522 * fix internal FIFO overflow.
1523 */
1524 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1525 sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1526 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
1527 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
1528 BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
1529 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
1530 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
1531 BGE_RDMA_RSRVCTRL_TXMRGN_320B;
1532 }
1533 /*
1534 * Enable fix for read DMA FIFO overruns.
1535 * The fix is to limit the number of RX BDs
1536 * the hardware would fetch at a fime.
1537 */
1538 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL,
1539 dmactl | BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
1540 }
1541
1542 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) {
1543 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
1544 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
1545 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
1546 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
1547 } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1548 /*
1549 * Allow 4KB burst length reads for non-LSO frames.
1550 * Enable 512B burst length reads for buffer descriptors.
1551 */
1552 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
1553 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
1554 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
1555 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
1556 }
1557
1558 /* Turn on read DMA state machine */
1559 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1560 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717)
1561 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
1562 if (sc->bnx_asicrev == BGE_ASICREV_BCM5784 ||
1563 sc->bnx_asicrev == BGE_ASICREV_BCM5785 ||
1564 sc->bnx_asicrev == BGE_ASICREV_BCM57780) {
1565 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1566 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1567 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1568 }
1569 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1570 val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
1571 BGE_RDMAMODE_H2BNC_VLAN_DET;
1572 /*
1573 * Allow multiple outstanding read requests from
1574 * non-LSO read DMA engine.
1575 */
1576 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
1577 }
1578 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1579 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1580 DELAY(40);
1581
1582 /* Turn on RX data completion state machine */
1583 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1584
1585 /* Turn on RX BD initiator state machine */
1586 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1587
1588 /* Turn on RX data and RX BD initiator state machine */
1589 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1590
1591 /* Turn on send BD completion state machine */
1592 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1593
1594 /* Turn on send data completion state machine */
1595 val = BGE_SDCMODE_ENABLE;
1596 if (sc->bnx_asicrev == BGE_ASICREV_BCM5761)
1597 val |= BGE_SDCMODE_CDELAY;
1598 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1599
1600 /* Turn on send data initiator state machine */
1601 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1602
1603 /* Turn on send BD initiator state machine */
1604 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1605
1606 /* Turn on send BD selector state machine */
1607 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1608
1609 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1610 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1611 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1612
1613 /* ack/clear link change events */
1614 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1615 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1616 BGE_MACSTAT_LINK_CHANGED);
1617 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1618
1619 /*
1620 * Enable attention when the link has changed state for
1621 * devices that use auto polling.
1622 */
1623 if (sc->bnx_flags & BNX_FLAG_TBI) {
1624 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1625 } else {
1626 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
1627 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
1628 DELAY(80);
1629 }
1630 }
1631
1632 /*
1633 * Clear any pending link state attention.
1634 * Otherwise some link state change events may be lost until attention
1635 * is cleared by bnx_intr() -> bnx_softc.bnx_link_upd() sequence.
1636 * It's not necessary on newer BCM chips - perhaps enabling link
1637 * state change attentions implies clearing pending attention.
1638 */
1639 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1640 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1641 BGE_MACSTAT_LINK_CHANGED);
1642
1643 /* Enable link state change attentions. */
1644 BNX_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1645
1646 return(0);
1647}
1648
1649/*
1650 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1651 * against our list and return its name if we find a match. Note
1652 * that since the Broadcom controller contains VPD support, we
1653 * can get the device name string from the controller itself instead
1654 * of the compiled-in string. This is a little slow, but it guarantees
1655 * we'll always announce the right product name.
1656 */
1657static int
1658bnx_probe(device_t dev)
1659{
1660 const struct bnx_type *t;
1661 uint16_t product, vendor;
1662
1663 if (!pci_is_pcie(dev))
1664 return ENXIO;
1665
1666 product = pci_get_device(dev);
1667 vendor = pci_get_vendor(dev);
1668
1669 for (t = bnx_devs; t->bnx_name != NULL; t++) {
1670 if (vendor == t->bnx_vid && product == t->bnx_did)
1671 break;
1672 }
1673 if (t->bnx_name == NULL)
1674 return ENXIO;
1675
1676 device_set_desc(dev, t->bnx_name);
1677 return 0;
1678}
1679
1680static int
1681bnx_attach(device_t dev)
1682{
1683 struct ifnet *ifp;
1684 struct bnx_softc *sc;
1685 uint32_t hwcfg = 0, misccfg;
1686 int error = 0, rid, capmask;
1687 uint8_t ether_addr[ETHER_ADDR_LEN];
1688 uint16_t product, vendor;
1689 driver_intr_t *intr_func;
1690 uintptr_t mii_priv = 0;
1691 u_int intr_flags;
1692
1693 sc = device_get_softc(dev);
1694 sc->bnx_dev = dev;
1695 callout_init(&sc->bnx_stat_timer);
1696 lwkt_serialize_init(&sc->bnx_jslot_serializer);
1697
1698 product = pci_get_device(dev);
1699 vendor = pci_get_vendor(dev);
1700
1701#ifndef BURN_BRIDGES
1702 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1703 uint32_t irq, mem;
1704
1705 irq = pci_read_config(dev, PCIR_INTLINE, 4);
1706 mem = pci_read_config(dev, BGE_PCI_BAR0, 4);
1707
1708 device_printf(dev, "chip is in D%d power mode "
1709 "-- setting to D0\n", pci_get_powerstate(dev));
1710
1711 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1712
1713 pci_write_config(dev, PCIR_INTLINE, irq, 4);
1714 pci_write_config(dev, BGE_PCI_BAR0, mem, 4);
1715 }
1716#endif /* !BURN_BRIDGE */
1717
1718 /*
1719 * Map control/status registers.
1720 */
1721 pci_enable_busmaster(dev);
1722
1723 rid = BGE_PCI_BAR0;
1724 sc->bnx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1725 RF_ACTIVE);
1726
1727 if (sc->bnx_res == NULL) {
1728 device_printf(dev, "couldn't map memory\n");
1729 return ENXIO;
1730 }
1731
1732 sc->bnx_btag = rman_get_bustag(sc->bnx_res);
1733 sc->bnx_bhandle = rman_get_bushandle(sc->bnx_res);
1734
1735 /* Save various chip information */
1736 sc->bnx_chipid =
1737 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
1738 BGE_PCIMISCCTL_ASICREV_SHIFT;
1739 if (BGE_ASICREV(sc->bnx_chipid) == BGE_ASICREV_USE_PRODID_REG) {
1740 /* All chips having dedicated ASICREV register have CPMU */
1741 sc->bnx_flags |= BNX_FLAG_CPMU;
1742
1743 switch (product) {
1744 case PCI_PRODUCT_BROADCOM_BCM5717:
1745 case PCI_PRODUCT_BROADCOM_BCM5718:
1746 case PCI_PRODUCT_BROADCOM_BCM5719:
1747 case PCI_PRODUCT_BROADCOM_BCM5720_ALT:
1748 sc->bnx_chipid = pci_read_config(dev,
1749 BGE_PCI_GEN2_PRODID_ASICREV, 4);
1750 break;
1751
1752 case PCI_PRODUCT_BROADCOM_BCM57761:
1753 case PCI_PRODUCT_BROADCOM_BCM57765:
1754 case PCI_PRODUCT_BROADCOM_BCM57781:
1755 case PCI_PRODUCT_BROADCOM_BCM57785:
1756 case PCI_PRODUCT_BROADCOM_BCM57791:
1757 case PCI_PRODUCT_BROADCOM_BCM57795:
1758 sc->bnx_chipid = pci_read_config(dev,
1759 BGE_PCI_GEN15_PRODID_ASICREV, 4);
1760 break;
1761
1762 default:
1763 sc->bnx_chipid = pci_read_config(dev,
1764 BGE_PCI_PRODID_ASICREV, 4);
1765 break;
1766 }
1767 }
1768 sc->bnx_asicrev = BGE_ASICREV(sc->bnx_chipid);
1769 sc->bnx_chiprev = BGE_CHIPREV(sc->bnx_chipid);
1770
1771 switch (sc->bnx_asicrev) {
1772 case BGE_ASICREV_BCM5717:
1773 case BGE_ASICREV_BCM5719:
1774 case BGE_ASICREV_BCM5720:
f368d0d9
SZ
1775 sc->bnx_flags |= BNX_FLAG_5717_PLUS | BNX_FLAG_57765_PLUS;
1776 break;
1777
6c8d8ecc 1778 case BGE_ASICREV_BCM57765:
f368d0d9 1779 sc->bnx_flags |= BNX_FLAG_57765_FAMILY | BNX_FLAG_57765_PLUS;
6c8d8ecc
SZ
1780 break;
1781 }
1782 sc->bnx_flags |= BNX_FLAG_SHORTDMA;
1783
1784 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906)
1785 sc->bnx_flags |= BNX_FLAG_NO_EEPROM;
1786
1787 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK;
1788
1789 sc->bnx_pciecap = pci_get_pciecap_ptr(sc->bnx_dev);
1790 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1791 sc->bnx_asicrev == BGE_ASICREV_BCM5720)
1792 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_2048);
1793 else
1794 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096);
1795 device_printf(dev, "CHIP ID 0x%08x; "
1796 "ASIC REV 0x%02x; CHIP REV 0x%02x\n",
1797 sc->bnx_chipid, sc->bnx_asicrev, sc->bnx_chiprev);
1798
1799 /*
1800 * Set various PHY quirk flags.
1801 */
1802
1803 capmask = MII_CAPMASK_DEFAULT;
1804 if ((sc->bnx_asicrev == BGE_ASICREV_BCM5703 &&
1805 (misccfg == 0x4000 || misccfg == 0x8000)) ||
1806 (sc->bnx_asicrev == BGE_ASICREV_BCM5705 &&
1807 vendor == PCI_VENDOR_BROADCOM &&
1808 (product == PCI_PRODUCT_BROADCOM_BCM5901 ||
1809 product == PCI_PRODUCT_BROADCOM_BCM5901A2 ||
1810 product == PCI_PRODUCT_BROADCOM_BCM5705F)) ||
1811 (vendor == PCI_VENDOR_BROADCOM &&
1812 (product == PCI_PRODUCT_BROADCOM_BCM5751F ||
1813 product == PCI_PRODUCT_BROADCOM_BCM5753F ||
1814 product == PCI_PRODUCT_BROADCOM_BCM5787F)) ||
1815 product == PCI_PRODUCT_BROADCOM_BCM57790 ||
1816 sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
1817 /* 10/100 only */
1818 capmask &= ~BMSR_EXTSTAT;
1819 }
1820
1821 mii_priv |= BRGPHY_FLAG_WIRESPEED;
1822
1823 if (sc->bnx_asicrev != BGE_ASICREV_BCM5906 &&
1824 sc->bnx_asicrev != BGE_ASICREV_BCM5717 &&
1825 sc->bnx_asicrev != BGE_ASICREV_BCM5719 &&
1826 sc->bnx_asicrev != BGE_ASICREV_BCM5720 &&
1827 sc->bnx_asicrev != BGE_ASICREV_BCM5785 &&
1828 sc->bnx_asicrev != BGE_ASICREV_BCM57765 &&
1829 sc->bnx_asicrev != BGE_ASICREV_BCM57780) {
1830 if (sc->bnx_asicrev == BGE_ASICREV_BCM5755 ||
1831 sc->bnx_asicrev == BGE_ASICREV_BCM5761 ||
1832 sc->bnx_asicrev == BGE_ASICREV_BCM5784 ||
1833 sc->bnx_asicrev == BGE_ASICREV_BCM5787) {
1834 if (product != PCI_PRODUCT_BROADCOM_BCM5722 &&
1835 product != PCI_PRODUCT_BROADCOM_BCM5756)
1836 mii_priv |= BRGPHY_FLAG_JITTER_BUG;
1837 if (product == PCI_PRODUCT_BROADCOM_BCM5755M)
1838 mii_priv |= BRGPHY_FLAG_ADJUST_TRIM;
1839 } else {
1840 mii_priv |= BRGPHY_FLAG_BER_BUG;
1841 }
1842 }
1843
1844 /*
1845 * Allocate interrupt
1846 */
1847 sc->bnx_irq_type = pci_alloc_1intr(dev, bnx_msi_enable, &sc->bnx_irq_rid,
1848 &intr_flags);
1849
1850 sc->bnx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->bnx_irq_rid,
1851 intr_flags);
1852 if (sc->bnx_irq == NULL) {
1853 device_printf(dev, "couldn't map interrupt\n");
1854 error = ENXIO;
1855 goto fail;
1856 }
1857
1858 if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) {
1859 sc->bnx_flags |= BNX_FLAG_ONESHOT_MSI;
1860 bnx_enable_msi(sc);
1861 }
1862
1863 /* Initialize if_name earlier, so if_printf could be used */
1864 ifp = &sc->arpcom.ac_if;
1865 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1866
1867 /* Try to reset the chip. */
1868 bnx_reset(sc);
1869
1870 if (bnx_chipinit(sc)) {
1871 device_printf(dev, "chip initialization failed\n");
1872 error = ENXIO;
1873 goto fail;
1874 }
1875
1876 /*
1877 * Get station address
1878 */
1879 error = bnx_get_eaddr(sc, ether_addr);
1880 if (error) {
1881 device_printf(dev, "failed to read station address\n");
1882 goto fail;
1883 }
1884
f368d0d9 1885 if (BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
1886 sc->bnx_return_ring_cnt = BGE_RETURN_RING_CNT;
1887 } else {
1888 /* 5705/5750 limits RX return ring to 512 entries. */
1889 sc->bnx_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1890 }
1891
1892 error = bnx_dma_alloc(sc);
1893 if (error)
1894 goto fail;
1895
1896 /* Set default tuneable values. */
1897 sc->bnx_rx_coal_ticks = BNX_RX_COAL_TICKS_DEF;
1898 sc->bnx_tx_coal_ticks = BNX_TX_COAL_TICKS_DEF;
1899 sc->bnx_rx_coal_bds = BNX_RX_COAL_BDS_DEF;
1900 sc->bnx_tx_coal_bds = BNX_TX_COAL_BDS_DEF;
1901 sc->bnx_rx_coal_bds_int = BNX_RX_COAL_BDS_DEF;
1902 sc->bnx_tx_coal_bds_int = BNX_TX_COAL_BDS_DEF;
1903
1904 /* Set up ifnet structure */
1905 ifp->if_softc = sc;
1906 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1907 ifp->if_ioctl = bnx_ioctl;
1908 ifp->if_start = bnx_start;
1909#ifdef DEVICE_POLLING
1910 ifp->if_poll = bnx_poll;
1911#endif
1912 ifp->if_watchdog = bnx_watchdog;
1913 ifp->if_init = bnx_init;
1914 ifp->if_mtu = ETHERMTU;
1915 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1916 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1917 ifq_set_ready(&ifp->if_snd);
1918
1919 ifp->if_capabilities |= IFCAP_HWCSUM;
1920 ifp->if_hwassist = BNX_CSUM_FEATURES;
1921 ifp->if_capenable = ifp->if_capabilities;
1922
1923 /*
1924 * Figure out what sort of media we have by checking the
1925 * hardware config word in the first 32k of NIC internal memory,
1926 * or fall back to examining the EEPROM if necessary.
1927 * Note: on some BCM5700 cards, this value appears to be unset.
1928 * If that's the case, we have to rely on identifying the NIC
1929 * by its PCI subsystem ID, as we do below for the SysKonnect
1930 * SK-9D41.
1931 */
1932 if (bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) {
1933 hwcfg = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
1934 } else {
1935 if (bnx_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
1936 sizeof(hwcfg))) {
1937 device_printf(dev, "failed to read EEPROM\n");
1938 error = ENXIO;
1939 goto fail;
1940 }
1941 hwcfg = ntohl(hwcfg);
1942 }
1943
1944 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
1945 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41 ||
1946 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
1947 sc->bnx_flags |= BNX_FLAG_TBI;
1948
1949 /* Setup MI MODE */
1950 if (sc->bnx_flags & BNX_FLAG_CPMU)
1951 sc->bnx_mi_mode = BGE_MIMODE_500KHZ_CONST;
1952 else
1953 sc->bnx_mi_mode = BGE_MIMODE_BASE;
1954
1955 /* Setup link status update stuffs */
1956 if (sc->bnx_flags & BNX_FLAG_TBI) {
1957 sc->bnx_link_upd = bnx_tbi_link_upd;
1958 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
1959 } else if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
1960 sc->bnx_link_upd = bnx_autopoll_link_upd;
1961 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
1962 } else {
1963 sc->bnx_link_upd = bnx_copper_link_upd;
1964 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
1965 }
1966
1967 /* Set default PHY address */
1968 sc->bnx_phyno = 1;
1969
1970 /*
1971 * PHY address mapping for various devices.
1972 *
1973 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
1974 * ---------+-------+-------+-------+-------+
1975 * BCM57XX | 1 | X | X | X |
1976 * BCM5704 | 1 | X | 1 | X |
1977 * BCM5717 | 1 | 8 | 2 | 9 |
1978 * BCM5719 | 1 | 8 | 2 | 9 |
1979 * BCM5720 | 1 | 8 | 2 | 9 |
1980 *
1981 * Other addresses may respond but they are not
1982 * IEEE compliant PHYs and should be ignored.
1983 */
80969639 1984 if (BNX_IS_5717_PLUS(sc)) {
6c8d8ecc
SZ
1985 int f;
1986
1987 f = pci_get_function(dev);
1988 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_A0) {
1989 if (CSR_READ_4(sc, BGE_SGDIG_STS) &
1990 BGE_SGDIGSTS_IS_SERDES)
1991 sc->bnx_phyno = f + 8;
1992 else
1993 sc->bnx_phyno = f + 1;
1994 } else {
1995 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
1996 BGE_CPMU_PHY_STRAP_IS_SERDES)
1997 sc->bnx_phyno = f + 8;
1998 else
1999 sc->bnx_phyno = f + 1;
2000 }
2001 }
2002
2003 if (sc->bnx_flags & BNX_FLAG_TBI) {
2004 ifmedia_init(&sc->bnx_ifmedia, IFM_IMASK,
2005 bnx_ifmedia_upd, bnx_ifmedia_sts);
2006 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2007 ifmedia_add(&sc->bnx_ifmedia,
2008 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2009 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2010 ifmedia_set(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO);
2011 sc->bnx_ifmedia.ifm_media = sc->bnx_ifmedia.ifm_cur->ifm_media;
2012 } else {
2013 struct mii_probe_args mii_args;
2014
2015 mii_probe_args_init(&mii_args, bnx_ifmedia_upd, bnx_ifmedia_sts);
2016 mii_args.mii_probemask = 1 << sc->bnx_phyno;
2017 mii_args.mii_capmask = capmask;
2018 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY;
2019 mii_args.mii_priv = mii_priv;
2020
2021 error = mii_probe(dev, &sc->bnx_miibus, &mii_args);
2022 if (error) {
2023 device_printf(dev, "MII without any PHY!\n");
2024 goto fail;
2025 }
2026 }
2027
2028 /*
2029 * Create sysctl nodes.
2030 */
2031 sysctl_ctx_init(&sc->bnx_sysctl_ctx);
2032 sc->bnx_sysctl_tree = SYSCTL_ADD_NODE(&sc->bnx_sysctl_ctx,
2033 SYSCTL_STATIC_CHILDREN(_hw),
2034 OID_AUTO,
2035 device_get_nameunit(dev),
2036 CTLFLAG_RD, 0, "");
2037 if (sc->bnx_sysctl_tree == NULL) {
2038 device_printf(dev, "can't add sysctl node\n");
2039 error = ENXIO;
2040 goto fail;
2041 }
2042
2043 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2044 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2045 OID_AUTO, "rx_coal_ticks",
2046 CTLTYPE_INT | CTLFLAG_RW,
2047 sc, 0, bnx_sysctl_rx_coal_ticks, "I",
2048 "Receive coalescing ticks (usec).");
2049 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2050 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2051 OID_AUTO, "tx_coal_ticks",
2052 CTLTYPE_INT | CTLFLAG_RW,
2053 sc, 0, bnx_sysctl_tx_coal_ticks, "I",
2054 "Transmit coalescing ticks (usec).");
2055 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2056 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2057 OID_AUTO, "rx_coal_bds",
2058 CTLTYPE_INT | CTLFLAG_RW,
2059 sc, 0, bnx_sysctl_rx_coal_bds, "I",
2060 "Receive max coalesced BD count.");
2061 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2062 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2063 OID_AUTO, "tx_coal_bds",
2064 CTLTYPE_INT | CTLFLAG_RW,
2065 sc, 0, bnx_sysctl_tx_coal_bds, "I",
2066 "Transmit max coalesced BD count.");
2067 /*
2068 * A common design characteristic for many Broadcom
2069 * client controllers is that they only support a
2070 * single outstanding DMA read operation on the PCIe
2071 * bus. This means that it will take twice as long to
2072 * fetch a TX frame that is split into header and
2073 * payload buffers as it does to fetch a single,
2074 * contiguous TX frame (2 reads vs. 1 read). For these
2075 * controllers, coalescing buffers to reduce the number
2076 * of memory reads is effective way to get maximum
2077 * performance(about 940Mbps). Without collapsing TX
2078 * buffers the maximum TCP bulk transfer performance
2079 * is about 850Mbps. However forcing coalescing mbufs
2080 * consumes a lot of CPU cycles, so leave it off by
2081 * default.
2082 */
2083 SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx,
2084 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2085 "force_defrag", CTLFLAG_RW, &sc->bnx_force_defrag, 0,
2086 "Force defragment on TX path");
2087
2088 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2089 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2090 "rx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2091 sc, 0, bnx_sysctl_rx_coal_bds_int, "I",
2092 "Receive max coalesced BD count during interrupt.");
2093 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2094 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2095 "tx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2096 sc, 0, bnx_sysctl_tx_coal_bds_int, "I",
2097 "Transmit max coalesced BD count during interrupt.");
2098
2099 /*
2100 * Call MI attach routine.
2101 */
2102 ether_ifattach(ifp, ether_addr, NULL);
2103
2104 if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) {
2105 if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) {
2106 intr_func = bnx_msi_oneshot;
2107 if (bootverbose)
2108 device_printf(dev, "oneshot MSI\n");
2109 } else {
2110 intr_func = bnx_msi;
2111 }
2112 } else {
2113 intr_func = bnx_intr_legacy;
2114 }
2115 error = bus_setup_intr(dev, sc->bnx_irq, INTR_MPSAFE, intr_func, sc,
2116 &sc->bnx_intrhand, ifp->if_serializer);
2117 if (error) {
2118 ether_ifdetach(ifp);
2119 device_printf(dev, "couldn't set up irq\n");
2120 goto fail;
2121 }
2122
2123 ifp->if_cpuid = rman_get_cpuid(sc->bnx_irq);
2124 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
2125
2126 return(0);
2127fail:
2128 bnx_detach(dev);
2129 return(error);
2130}
2131
2132static int
2133bnx_detach(device_t dev)
2134{
2135 struct bnx_softc *sc = device_get_softc(dev);
2136
2137 if (device_is_attached(dev)) {
2138 struct ifnet *ifp = &sc->arpcom.ac_if;
2139
2140 lwkt_serialize_enter(ifp->if_serializer);
2141 bnx_stop(sc);
2142 bnx_reset(sc);
2143 bus_teardown_intr(dev, sc->bnx_irq, sc->bnx_intrhand);
2144 lwkt_serialize_exit(ifp->if_serializer);
2145
2146 ether_ifdetach(ifp);
2147 }
2148
2149 if (sc->bnx_flags & BNX_FLAG_TBI)
2150 ifmedia_removeall(&sc->bnx_ifmedia);
2151 if (sc->bnx_miibus)
2152 device_delete_child(dev, sc->bnx_miibus);
2153 bus_generic_detach(dev);
2154
2155 if (sc->bnx_irq != NULL) {
2156 bus_release_resource(dev, SYS_RES_IRQ, sc->bnx_irq_rid,
2157 sc->bnx_irq);
2158 }
2159 if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI)
2160 pci_release_msi(dev);
2161
2162 if (sc->bnx_res != NULL) {
2163 bus_release_resource(dev, SYS_RES_MEMORY,
2164 BGE_PCI_BAR0, sc->bnx_res);
2165 }
2166
2167 if (sc->bnx_sysctl_tree != NULL)
2168 sysctl_ctx_free(&sc->bnx_sysctl_ctx);
2169
2170 bnx_dma_free(sc);
2171
2172 return 0;
2173}
2174
2175static void
2176bnx_reset(struct bnx_softc *sc)
2177{
2178 device_t dev;
2179 uint32_t cachesize, command, pcistate, reset;
2180 void (*write_op)(struct bnx_softc *, uint32_t, uint32_t);
2181 int i, val = 0;
2182 uint16_t devctl;
2183
2184 dev = sc->bnx_dev;
2185
2186 if (sc->bnx_asicrev != BGE_ASICREV_BCM5906)
2187 write_op = bnx_writemem_direct;
2188 else
2189 write_op = bnx_writereg_ind;
2190
2191 /* Save some important PCI state. */
2192 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2193 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2194 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2195
2196 pci_write_config(dev, BGE_PCI_MISC_CTL,
2197 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2198 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2199 BGE_PCIMISCCTL_TAGGED_STATUS, 4);
2200
2201 /* Disable fastboot on controllers that support it. */
2202 if (bootverbose)
2203 if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n");
2204 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2205
2206 /*
2207 * Write the magic number to SRAM at offset 0xB50.
2208 * When firmware finishes its initialization it will
2209 * write ~BGE_MAGIC_NUMBER to the same location.
2210 */
2211 bnx_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2212
2213 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2214
2215 /* XXX: Broadcom Linux driver. */
2216 /* Force PCI-E 1.0a mode */
2217 if (sc->bnx_asicrev != BGE_ASICREV_BCM5785 &&
f368d0d9 2218 !BNX_IS_57765_PLUS(sc) &&
6c8d8ecc
SZ
2219 CSR_READ_4(sc, BGE_PCIE_PHY_TSTCTL) ==
2220 (BGE_PCIE_PHY_TSTCTL_PSCRAM |
2221 BGE_PCIE_PHY_TSTCTL_PCIE10)) {
2222 CSR_WRITE_4(sc, BGE_PCIE_PHY_TSTCTL,
2223 BGE_PCIE_PHY_TSTCTL_PSCRAM);
2224 }
2225 if (sc->bnx_chipid != BGE_CHIPID_BCM5750_A0) {
2226 /* Prevent PCIE link training during global reset */
2227 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2228 reset |= (1<<29);
2229 }
2230
2231 /*
2232 * Set GPHY Power Down Override to leave GPHY
2233 * powered up in D0 uninitialized.
2234 */
2235 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0)
2236 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
2237
2238 /* Issue global reset */
2239 write_op(sc, BGE_MISC_CFG, reset);
2240
2241 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
2242 uint32_t status, ctrl;
2243
2244 status = CSR_READ_4(sc, BGE_VCPU_STATUS);
2245 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
2246 status | BGE_VCPU_STATUS_DRV_RESET);
2247 ctrl = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
2248 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
2249 ctrl & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
2250 }
2251
2252 DELAY(1000);
2253
2254 /* XXX: Broadcom Linux driver. */
2255 if (sc->bnx_chipid == BGE_CHIPID_BCM5750_A0) {
2256 uint32_t v;
2257
2258 DELAY(500000); /* wait for link training to complete */
2259 v = pci_read_config(dev, 0xc4, 4);
2260 pci_write_config(dev, 0xc4, v | (1<<15), 4);
2261 }
2262
2263 devctl = pci_read_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL, 2);
2264
2265 /* Disable no snoop and disable relaxed ordering. */
2266 devctl &= ~(PCIEM_DEVCTL_RELAX_ORDER | PCIEM_DEVCTL_NOSNOOP);
2267
2268 /* Old PCI-E chips only support 128 bytes Max PayLoad Size. */
2269 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0) {
2270 devctl &= ~PCIEM_DEVCTL_MAX_PAYLOAD_MASK;
2271 devctl |= PCIEM_DEVCTL_MAX_PAYLOAD_128;
2272 }
2273
2274 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL,
2275 devctl, 2);
2276
2277 /* Clear error status. */
2278 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVSTS,
2279 PCIEM_DEVSTS_CORR_ERR |
2280 PCIEM_DEVSTS_NFATAL_ERR |
2281 PCIEM_DEVSTS_FATAL_ERR |
2282 PCIEM_DEVSTS_UNSUPP_REQ, 2);
2283
2284 /* Reset some of the PCI state that got zapped by reset */
2285 pci_write_config(dev, BGE_PCI_MISC_CTL,
2286 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2287 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2288 BGE_PCIMISCCTL_TAGGED_STATUS, 4);
2289 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2290 pci_write_config(dev, BGE_PCI_CMD, command, 4);
2291 write_op(sc, BGE_MISC_CFG, (65 << 1));
2292
2293 /* Enable memory arbiter */
2294 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2295
2296 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
2297 for (i = 0; i < BNX_TIMEOUT; i++) {
2298 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2299 if (val & BGE_VCPU_STATUS_INIT_DONE)
2300 break;
2301 DELAY(100);
2302 }
2303 if (i == BNX_TIMEOUT) {
2304 if_printf(&sc->arpcom.ac_if, "reset timed out\n");
2305 return;
2306 }
2307 } else {
2308 /*
2309 * Poll until we see the 1's complement of the magic number.
2310 * This indicates that the firmware initialization
2311 * is complete.
2312 */
2313 for (i = 0; i < BNX_FIRMWARE_TIMEOUT; i++) {
2314 val = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2315 if (val == ~BGE_MAGIC_NUMBER)
2316 break;
2317 DELAY(10);
2318 }
2319 if (i == BNX_FIRMWARE_TIMEOUT) {
2320 if_printf(&sc->arpcom.ac_if, "firmware handshake "
2321 "timed out, found 0x%08x\n", val);
2322 }
2323
2324 /* BCM57765 A0 needs additional time before accessing. */
2325 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0)
2326 DELAY(10 * 1000);
2327 }
2328
2329 /*
2330 * XXX Wait for the value of the PCISTATE register to
2331 * return to its original pre-reset state. This is a
2332 * fairly good indicator of reset completion. If we don't
2333 * wait for the reset to fully complete, trying to read
2334 * from the device's non-PCI registers may yield garbage
2335 * results.
2336 */
2337 for (i = 0; i < BNX_TIMEOUT; i++) {
2338 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2339 break;
2340 DELAY(10);
2341 }
2342
2343 /* Fix up byte swapping */
2344 CSR_WRITE_4(sc, BGE_MODE_CTL, bnx_dma_swap_options(sc));
2345
2346 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2347
2348 /*
2349 * The 5704 in TBI mode apparently needs some special
2350 * adjustment to insure the SERDES drive level is set
2351 * to 1.2V.
2352 */
2353 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704 &&
2354 (sc->bnx_flags & BNX_FLAG_TBI)) {
2355 uint32_t serdescfg;
2356
2357 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2358 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2359 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2360 }
2361
2362 /* XXX: Broadcom Linux driver. */
f368d0d9 2363 if (!BNX_IS_57765_PLUS(sc) &&
6c8d8ecc
SZ
2364 sc->bnx_chipid != BGE_CHIPID_BCM5750_A0 &&
2365 sc->bnx_asicrev != BGE_ASICREV_BCM5785) {
2366 uint32_t v;
2367
2368 /* Enable Data FIFO protection. */
2369 v = CSR_READ_4(sc, 0x7c00);
2370 CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2371 }
2372
2373 DELAY(10000);
2374
2375 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
2376 BNX_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
2377 CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
2378 }
2379}
2380
2381/*
2382 * Frame reception handling. This is called if there's a frame
2383 * on the receive return list.
2384 *
2385 * Note: we have to be able to handle two possibilities here:
2386 * 1) the frame is from the jumbo recieve ring
2387 * 2) the frame is from the standard receive ring
2388 */
2389
2390static void
2391bnx_rxeof(struct bnx_softc *sc, uint16_t rx_prod)
2392{
2393 struct ifnet *ifp;
2394 int stdcnt = 0, jumbocnt = 0;
2395
2396 ifp = &sc->arpcom.ac_if;
2397
2398 while (sc->bnx_rx_saved_considx != rx_prod) {
2399 struct bge_rx_bd *cur_rx;
2400 uint32_t rxidx;
2401 struct mbuf *m = NULL;
2402 uint16_t vlan_tag = 0;
2403 int have_tag = 0;
2404
2405 cur_rx =
2406 &sc->bnx_ldata.bnx_rx_return_ring[sc->bnx_rx_saved_considx];
2407
2408 rxidx = cur_rx->bge_idx;
2409 BNX_INC(sc->bnx_rx_saved_considx, sc->bnx_return_ring_cnt);
2410
2411 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2412 have_tag = 1;
2413 vlan_tag = cur_rx->bge_vlan_tag;
2414 }
2415
2416 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2417 BNX_INC(sc->bnx_jumbo, BGE_JUMBO_RX_RING_CNT);
2418 jumbocnt++;
2419
2420 if (rxidx != sc->bnx_jumbo) {
2421 ifp->if_ierrors++;
2422 if_printf(ifp, "sw jumbo index(%d) "
2423 "and hw jumbo index(%d) mismatch, drop!\n",
2424 sc->bnx_jumbo, rxidx);
2425 bnx_setup_rxdesc_jumbo(sc, rxidx);
2426 continue;
2427 }
2428
2429 m = sc->bnx_cdata.bnx_rx_jumbo_chain[rxidx].bnx_mbuf;
2430 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2431 ifp->if_ierrors++;
2432 bnx_setup_rxdesc_jumbo(sc, sc->bnx_jumbo);
2433 continue;
2434 }
2435 if (bnx_newbuf_jumbo(sc, sc->bnx_jumbo, 0)) {
2436 ifp->if_ierrors++;
2437 bnx_setup_rxdesc_jumbo(sc, sc->bnx_jumbo);
2438 continue;
2439 }
2440 } else {
2441 BNX_INC(sc->bnx_std, BGE_STD_RX_RING_CNT);
2442 stdcnt++;
2443
2444 if (rxidx != sc->bnx_std) {
2445 ifp->if_ierrors++;
2446 if_printf(ifp, "sw std index(%d) "
2447 "and hw std index(%d) mismatch, drop!\n",
2448 sc->bnx_std, rxidx);
2449 bnx_setup_rxdesc_std(sc, rxidx);
2450 continue;
2451 }
2452
2453 m = sc->bnx_cdata.bnx_rx_std_chain[rxidx].bnx_mbuf;
2454 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2455 ifp->if_ierrors++;
2456 bnx_setup_rxdesc_std(sc, sc->bnx_std);
2457 continue;
2458 }
2459 if (bnx_newbuf_std(sc, sc->bnx_std, 0)) {
2460 ifp->if_ierrors++;
2461 bnx_setup_rxdesc_std(sc, sc->bnx_std);
2462 continue;
2463 }
2464 }
2465
2466 ifp->if_ipackets++;
2467 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2468 m->m_pkthdr.rcvif = ifp;
2469
2470 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2471 (cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
2472 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2473 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2474 if ((cur_rx->bge_error_flag &
2475 BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
2476 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2477 }
2478 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2479 m->m_pkthdr.csum_data =
2480 cur_rx->bge_tcp_udp_csum;
2481 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
2482 CSUM_PSEUDO_HDR;
2483 }
2484 }
2485
2486 /*
2487 * If we received a packet with a vlan tag, pass it
2488 * to vlan_input() instead of ether_input().
2489 */
2490 if (have_tag) {
2491 m->m_flags |= M_VLANTAG;
2492 m->m_pkthdr.ether_vlantag = vlan_tag;
2493 have_tag = vlan_tag = 0;
2494 }
2495 ifp->if_input(ifp, m);
2496 }
2497
2498 bnx_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bnx_rx_saved_considx);
2499 if (stdcnt)
2500 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bnx_std);
2501 if (jumbocnt)
2502 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo);
2503}
2504
2505static void
2506bnx_txeof(struct bnx_softc *sc, uint16_t tx_cons)
2507{
2508 struct bge_tx_bd *cur_tx = NULL;
2509 struct ifnet *ifp;
2510
2511 ifp = &sc->arpcom.ac_if;
2512
2513 /*
2514 * Go through our tx ring and free mbufs for those
2515 * frames that have been sent.
2516 */
2517 while (sc->bnx_tx_saved_considx != tx_cons) {
2518 uint32_t idx = 0;
2519
2520 idx = sc->bnx_tx_saved_considx;
2521 cur_tx = &sc->bnx_ldata.bnx_tx_ring[idx];
2522 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2523 ifp->if_opackets++;
2524 if (sc->bnx_cdata.bnx_tx_chain[idx] != NULL) {
2525 bus_dmamap_unload(sc->bnx_cdata.bnx_tx_mtag,
2526 sc->bnx_cdata.bnx_tx_dmamap[idx]);
2527 m_freem(sc->bnx_cdata.bnx_tx_chain[idx]);
2528 sc->bnx_cdata.bnx_tx_chain[idx] = NULL;
2529 }
2530 sc->bnx_txcnt--;
2531 BNX_INC(sc->bnx_tx_saved_considx, BGE_TX_RING_CNT);
2532 }
2533
2534 if (cur_tx != NULL &&
2535 (BGE_TX_RING_CNT - sc->bnx_txcnt) >=
2536 (BNX_NSEG_RSVD + BNX_NSEG_SPARE))
2537 ifp->if_flags &= ~IFF_OACTIVE;
2538
2539 if (sc->bnx_txcnt == 0)
2540 ifp->if_timer = 0;
2541
2542 if (!ifq_is_empty(&ifp->if_snd))
2543 if_devstart(ifp);
2544}
2545
2546#ifdef DEVICE_POLLING
2547
2548static void
2549bnx_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2550{
2551 struct bnx_softc *sc = ifp->if_softc;
2552 struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
2553 uint16_t rx_prod, tx_cons;
2554
2555 switch(cmd) {
2556 case POLL_REGISTER:
2557 bnx_disable_intr(sc);
2558 break;
2559 case POLL_DEREGISTER:
2560 bnx_enable_intr(sc);
2561 break;
2562 case POLL_AND_CHECK_STATUS:
2563 /*
2564 * Process link state changes.
2565 */
2566 bnx_link_poll(sc);
2567 /* Fall through */
2568 case POLL_ONLY:
2569 sc->bnx_status_tag = sblk->bge_status_tag;
2570 /*
2571 * Use a load fence to ensure that status_tag
2572 * is saved before rx_prod and tx_cons.
2573 */
2574 cpu_lfence();
2575
2576 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
2577 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
2578 if (ifp->if_flags & IFF_RUNNING) {
2579 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
2580 if (sc->bnx_rx_saved_considx != rx_prod)
2581 bnx_rxeof(sc, rx_prod);
2582
2583 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
2584 if (sc->bnx_tx_saved_considx != tx_cons)
2585 bnx_txeof(sc, tx_cons);
2586 }
2587 break;
2588 }
2589}
2590
2591#endif
2592
2593static void
2594bnx_intr_legacy(void *xsc)
2595{
2596 struct bnx_softc *sc = xsc;
2597 struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
2598
2599 if (sc->bnx_status_tag == sblk->bge_status_tag) {
2600 uint32_t val;
2601
2602 val = pci_read_config(sc->bnx_dev, BGE_PCI_PCISTATE, 4);
2603 if (val & BGE_PCISTAT_INTR_NOTACT)
2604 return;
2605 }
2606
2607 /*
2608 * NOTE:
2609 * Interrupt will have to be disabled if tagged status
2610 * is used, else interrupt will always be asserted on
2611 * certain chips (at least on BCM5750 AX/BX).
2612 */
2613 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
2614
2615 bnx_intr(sc);
2616}
2617
2618static void
2619bnx_msi(void *xsc)
2620{
2621 struct bnx_softc *sc = xsc;
2622
2623 /* Disable interrupt first */
2624 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
2625 bnx_intr(sc);
2626}
2627
2628static void
2629bnx_msi_oneshot(void *xsc)
2630{
2631 bnx_intr(xsc);
2632}
2633
2634static void
2635bnx_intr(struct bnx_softc *sc)
2636{
2637 struct ifnet *ifp = &sc->arpcom.ac_if;
2638 struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
2639 uint16_t rx_prod, tx_cons;
2640 uint32_t status;
2641
2642 sc->bnx_status_tag = sblk->bge_status_tag;
2643 /*
2644 * Use a load fence to ensure that status_tag is saved
2645 * before rx_prod, tx_cons and status.
2646 */
2647 cpu_lfence();
2648
2649 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
2650 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
2651 status = sblk->bge_status;
2652
2653 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) || sc->bnx_link_evt)
2654 bnx_link_poll(sc);
2655
2656 if (ifp->if_flags & IFF_RUNNING) {
2657 if (sc->bnx_rx_saved_considx != rx_prod)
2658 bnx_rxeof(sc, rx_prod);
2659
2660 if (sc->bnx_tx_saved_considx != tx_cons)
2661 bnx_txeof(sc, tx_cons);
2662 }
2663
2664 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24);
2665
2666 if (sc->bnx_coal_chg)
2667 bnx_coal_change(sc);
2668}
2669
2670static void
2671bnx_tick(void *xsc)
2672{
2673 struct bnx_softc *sc = xsc;
2674 struct ifnet *ifp = &sc->arpcom.ac_if;
2675
2676 lwkt_serialize_enter(ifp->if_serializer);
2677
2678 bnx_stats_update_regs(sc);
2679
2680 if (sc->bnx_flags & BNX_FLAG_TBI) {
2681 /*
2682 * Since in TBI mode auto-polling can't be used we should poll
2683 * link status manually. Here we register pending link event
2684 * and trigger interrupt.
2685 */
2686 sc->bnx_link_evt++;
2687 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
2688 } else if (!sc->bnx_link) {
2689 mii_tick(device_get_softc(sc->bnx_miibus));
2690 }
2691
2692 callout_reset(&sc->bnx_stat_timer, hz, bnx_tick, sc);
2693
2694 lwkt_serialize_exit(ifp->if_serializer);
2695}
2696
2697static void
2698bnx_stats_update_regs(struct bnx_softc *sc)
2699{
2700 struct ifnet *ifp = &sc->arpcom.ac_if;
2701 struct bge_mac_stats_regs stats;
2702 uint32_t *s;
2703 int i;
2704
2705 s = (uint32_t *)&stats;
2706 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2707 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2708 s++;
2709 }
2710
2711 ifp->if_collisions +=
2712 (stats.dot3StatsSingleCollisionFrames +
2713 stats.dot3StatsMultipleCollisionFrames +
2714 stats.dot3StatsExcessiveCollisions +
2715 stats.dot3StatsLateCollisions) -
2716 ifp->if_collisions;
2717}
2718
2719/*
2720 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2721 * pointers to descriptors.
2722 */
2723static int
2724bnx_encap(struct bnx_softc *sc, struct mbuf **m_head0, uint32_t *txidx)
2725{
2726 struct bge_tx_bd *d = NULL;
2727 uint16_t csum_flags = 0;
2728 bus_dma_segment_t segs[BNX_NSEG_NEW];
2729 bus_dmamap_t map;
2730 int error, maxsegs, nsegs, idx, i;
2731 struct mbuf *m_head = *m_head0, *m_new;
2732
2733 if (m_head->m_pkthdr.csum_flags) {
2734 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2735 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2736 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2737 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2738 if (m_head->m_flags & M_LASTFRAG)
2739 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2740 else if (m_head->m_flags & M_FRAG)
2741 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2742 }
2743
2744 idx = *txidx;
2745 map = sc->bnx_cdata.bnx_tx_dmamap[idx];
2746
2747 maxsegs = (BGE_TX_RING_CNT - sc->bnx_txcnt) - BNX_NSEG_RSVD;
2748 KASSERT(maxsegs >= BNX_NSEG_SPARE,
2749 ("not enough segments %d", maxsegs));
2750
2751 if (maxsegs > BNX_NSEG_NEW)
2752 maxsegs = BNX_NSEG_NEW;
2753
2754 /*
2755 * Pad outbound frame to BGE_MIN_FRAMELEN for an unusual reason.
2756 * The bge hardware will pad out Tx runts to BGE_MIN_FRAMELEN,
2757 * but when such padded frames employ the bge IP/TCP checksum
2758 * offload, the hardware checksum assist gives incorrect results
2759 * (possibly from incorporating its own padding into the UDP/TCP
2760 * checksum; who knows). If we pad such runts with zeros, the
2761 * onboard checksum comes out correct.
2762 */
2763 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
2764 m_head->m_pkthdr.len < BNX_MIN_FRAMELEN) {
2765 error = m_devpad(m_head, BNX_MIN_FRAMELEN);
2766 if (error)
2767 goto back;
2768 }
2769
2770 if ((sc->bnx_flags & BNX_FLAG_SHORTDMA) && m_head->m_next != NULL) {
2771 m_new = bnx_defrag_shortdma(m_head);
2772 if (m_new == NULL) {
2773 error = ENOBUFS;
2774 goto back;
2775 }
2776 *m_head0 = m_head = m_new;
2777 }
2778 if (sc->bnx_force_defrag && m_head->m_next != NULL) {
2779 /*
2780 * Forcefully defragment mbuf chain to overcome hardware
2781 * limitation which only support a single outstanding
2782 * DMA read operation. If it fails, keep moving on using
2783 * the original mbuf chain.
2784 */
2785 m_new = m_defrag(m_head, MB_DONTWAIT);
2786 if (m_new != NULL)
2787 *m_head0 = m_head = m_new;
2788 }
2789
2790 error = bus_dmamap_load_mbuf_defrag(sc->bnx_cdata.bnx_tx_mtag, map,
2791 m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
2792 if (error)
2793 goto back;
2794
2795 m_head = *m_head0;
2796 bus_dmamap_sync(sc->bnx_cdata.bnx_tx_mtag, map, BUS_DMASYNC_PREWRITE);
2797
2798 for (i = 0; ; i++) {
2799 d = &sc->bnx_ldata.bnx_tx_ring[idx];
2800
2801 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
2802 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
2803 d->bge_len = segs[i].ds_len;
2804 d->bge_flags = csum_flags;
2805
2806 if (i == nsegs - 1)
2807 break;
2808 BNX_INC(idx, BGE_TX_RING_CNT);
2809 }
2810 /* Mark the last segment as end of packet... */
2811 d->bge_flags |= BGE_TXBDFLAG_END;
2812
2813 /* Set vlan tag to the first segment of the packet. */
2814 d = &sc->bnx_ldata.bnx_tx_ring[*txidx];
2815 if (m_head->m_flags & M_VLANTAG) {
2816 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2817 d->bge_vlan_tag = m_head->m_pkthdr.ether_vlantag;
2818 } else {
2819 d->bge_vlan_tag = 0;
2820 }
2821
2822 /*
2823 * Insure that the map for this transmission is placed at
2824 * the array index of the last descriptor in this chain.
2825 */
2826 sc->bnx_cdata.bnx_tx_dmamap[*txidx] = sc->bnx_cdata.bnx_tx_dmamap[idx];
2827 sc->bnx_cdata.bnx_tx_dmamap[idx] = map;
2828 sc->bnx_cdata.bnx_tx_chain[idx] = m_head;
2829 sc->bnx_txcnt += nsegs;
2830
2831 BNX_INC(idx, BGE_TX_RING_CNT);
2832 *txidx = idx;
2833back:
2834 if (error) {
2835 m_freem(*m_head0);
2836 *m_head0 = NULL;
2837 }
2838 return error;
2839}
2840
2841/*
2842 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2843 * to the mbuf data regions directly in the transmit descriptors.
2844 */
2845static void
2846bnx_start(struct ifnet *ifp)
2847{
2848 struct bnx_softc *sc = ifp->if_softc;
2849 struct mbuf *m_head = NULL;
2850 uint32_t prodidx;
2851 int need_trans;
2852
2853 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
2854 return;
2855
2856 prodidx = sc->bnx_tx_prodidx;
2857
2858 need_trans = 0;
2859 while (sc->bnx_cdata.bnx_tx_chain[prodidx] == NULL) {
2860 m_head = ifq_dequeue(&ifp->if_snd, NULL);
2861 if (m_head == NULL)
2862 break;
2863
2864 /*
2865 * XXX
2866 * The code inside the if() block is never reached since we
2867 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
2868 * requests to checksum TCP/UDP in a fragmented packet.
2869 *
2870 * XXX
2871 * safety overkill. If this is a fragmented packet chain
2872 * with delayed TCP/UDP checksums, then only encapsulate
2873 * it if we have enough descriptors to handle the entire
2874 * chain at once.
2875 * (paranoia -- may not actually be needed)
2876 */
2877 if ((m_head->m_flags & M_FIRSTFRAG) &&
2878 (m_head->m_pkthdr.csum_flags & CSUM_DELAY_DATA)) {
2879 if ((BGE_TX_RING_CNT - sc->bnx_txcnt) <
2880 m_head->m_pkthdr.csum_data + BNX_NSEG_RSVD) {
2881 ifp->if_flags |= IFF_OACTIVE;
2882 ifq_prepend(&ifp->if_snd, m_head);
2883 break;
2884 }
2885 }
2886
2887 /*
2888 * Sanity check: avoid coming within BGE_NSEG_RSVD
2889 * descriptors of the end of the ring. Also make
2890 * sure there are BGE_NSEG_SPARE descriptors for
2891 * jumbo buffers' defragmentation.
2892 */
2893 if ((BGE_TX_RING_CNT - sc->bnx_txcnt) <
2894 (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) {
2895 ifp->if_flags |= IFF_OACTIVE;
2896 ifq_prepend(&ifp->if_snd, m_head);
2897 break;
2898 }
2899
2900 /*
2901 * Pack the data into the transmit ring. If we
2902 * don't have room, set the OACTIVE flag and wait
2903 * for the NIC to drain the ring.
2904 */
2905 if (bnx_encap(sc, &m_head, &prodidx)) {
2906 ifp->if_flags |= IFF_OACTIVE;
2907 ifp->if_oerrors++;
2908 break;
2909 }
2910 need_trans = 1;
2911
2912 ETHER_BPF_MTAP(ifp, m_head);
2913 }
2914
2915 if (!need_trans)
2916 return;
2917
2918 /* Transmit */
2919 bnx_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2920
2921 sc->bnx_tx_prodidx = prodidx;
2922
2923 /*
2924 * Set a timeout in case the chip goes out to lunch.
2925 */
2926 ifp->if_timer = 5;
2927}
2928
2929static void
2930bnx_init(void *xsc)
2931{
2932 struct bnx_softc *sc = xsc;
2933 struct ifnet *ifp = &sc->arpcom.ac_if;
2934 uint16_t *m;
2935 uint32_t mode;
2936
2937 ASSERT_SERIALIZED(ifp->if_serializer);
2938
2939 /* Cancel pending I/O and flush buffers. */
2940 bnx_stop(sc);
2941 bnx_reset(sc);
2942 bnx_chipinit(sc);
2943
2944 /*
2945 * Init the various state machines, ring
2946 * control blocks and firmware.
2947 */
2948 if (bnx_blockinit(sc)) {
2949 if_printf(ifp, "initialization failure\n");
2950 bnx_stop(sc);
2951 return;
2952 }
2953
2954 /* Specify MTU. */
2955 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
2956 ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
2957
2958 /* Load our MAC address. */
2959 m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
2960 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2961 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2962
2963 /* Enable or disable promiscuous mode as needed. */
2964 bnx_setpromisc(sc);
2965
2966 /* Program multicast filter. */
2967 bnx_setmulti(sc);
2968
2969 /* Init RX ring. */
2970 if (bnx_init_rx_ring_std(sc)) {
2971 if_printf(ifp, "RX ring initialization failed\n");
2972 bnx_stop(sc);
2973 return;
2974 }
2975
2976 /* Init jumbo RX ring. */
2977 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) {
2978 if (bnx_init_rx_ring_jumbo(sc)) {
2979 if_printf(ifp, "Jumbo RX ring initialization failed\n");
2980 bnx_stop(sc);
2981 return;
2982 }
2983 }
2984
2985 /* Init our RX return ring index */
2986 sc->bnx_rx_saved_considx = 0;
2987
2988 /* Init TX ring. */
2989 bnx_init_tx_ring(sc);
2990
2991 /* Enable TX MAC state machine lockup fix. */
2992 mode = CSR_READ_4(sc, BGE_TX_MODE);
2993 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
2994 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
2995 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
2996 mode |= CSR_READ_4(sc, BGE_TX_MODE) &
2997 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
2998 }
2999 /* Turn on transmitter */
3000 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
3001
3002 /* Turn on receiver */
3003 BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3004
3005 /*
3006 * Set the number of good frames to receive after RX MBUF
3007 * Low Watermark has been reached. After the RX MAC receives
3008 * this number of frames, it will drop subsequent incoming
3009 * frames until the MBUF High Watermark is reached.
3010 */
3011 if (sc->bnx_asicrev == BGE_ASICREV_BCM57765)
3012 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
3013 else
3014 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
3015
3016 if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) {
3017 if (bootverbose) {
3018 if_printf(ifp, "MSI_MODE: %#x\n",
3019 CSR_READ_4(sc, BGE_MSI_MODE));
3020 }
3021 }
3022
3023 /* Tell firmware we're alive. */
3024 BNX_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3025
3026 /* Enable host interrupts if polling(4) is not enabled. */
3027 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA, 4);
3028#ifdef DEVICE_POLLING
3029 if (ifp->if_flags & IFF_POLLING)
3030 bnx_disable_intr(sc);
3031 else
3032#endif
3033 bnx_enable_intr(sc);
3034
3035 bnx_ifmedia_upd(ifp);
3036
3037 ifp->if_flags |= IFF_RUNNING;
3038 ifp->if_flags &= ~IFF_OACTIVE;
3039
3040 callout_reset(&sc->bnx_stat_timer, hz, bnx_tick, sc);
3041}
3042
3043/*
3044 * Set media options.
3045 */
3046static int
3047bnx_ifmedia_upd(struct ifnet *ifp)
3048{
3049 struct bnx_softc *sc = ifp->if_softc;
3050
3051 /* If this is a 1000baseX NIC, enable the TBI port. */
3052 if (sc->bnx_flags & BNX_FLAG_TBI) {
3053 struct ifmedia *ifm = &sc->bnx_ifmedia;
3054
3055 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3056 return(EINVAL);
3057
3058 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3059 case IFM_AUTO:
3060 break;
3061
3062 case IFM_1000_SX:
3063 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3064 BNX_CLRBIT(sc, BGE_MAC_MODE,
3065 BGE_MACMODE_HALF_DUPLEX);
3066 } else {
3067 BNX_SETBIT(sc, BGE_MAC_MODE,
3068 BGE_MACMODE_HALF_DUPLEX);
3069 }
3070 break;
3071 default:
3072 return(EINVAL);
3073 }
3074 } else {
3075 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3076
3077 sc->bnx_link_evt++;
3078 sc->bnx_link = 0;
3079 if (mii->mii_instance) {
3080 struct mii_softc *miisc;
3081
3082 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3083 mii_phy_reset(miisc);
3084 }
3085 mii_mediachg(mii);
3086
3087 /*
3088 * Force an interrupt so that we will call bnx_link_upd
3089 * if needed and clear any pending link state attention.
3090 * Without this we are not getting any further interrupts
3091 * for link state changes and thus will not UP the link and
3092 * not be able to send in bnx_start. The only way to get
3093 * things working was to receive a packet and get an RX
3094 * intr.
3095 *
3096 * bnx_tick should help for fiber cards and we might not
3097 * need to do this here if BNX_FLAG_TBI is set but as
3098 * we poll for fiber anyway it should not harm.
3099 */
3100 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3101 }
3102 return(0);
3103}
3104
3105/*
3106 * Report current media status.
3107 */
3108static void
3109bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3110{
3111 struct bnx_softc *sc = ifp->if_softc;
3112
3113 if (sc->bnx_flags & BNX_FLAG_TBI) {
3114 ifmr->ifm_status = IFM_AVALID;
3115 ifmr->ifm_active = IFM_ETHER;
3116 if (CSR_READ_4(sc, BGE_MAC_STS) &
3117 BGE_MACSTAT_TBI_PCS_SYNCHED) {
3118 ifmr->ifm_status |= IFM_ACTIVE;
3119 } else {
3120 ifmr->ifm_active |= IFM_NONE;
3121 return;
3122 }
3123
3124 ifmr->ifm_active |= IFM_1000_SX;
3125 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3126 ifmr->ifm_active |= IFM_HDX;
3127 else
3128 ifmr->ifm_active |= IFM_FDX;
3129 } else {
3130 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3131
3132 mii_pollstat(mii);
3133 ifmr->ifm_active = mii->mii_media_active;
3134 ifmr->ifm_status = mii->mii_media_status;
3135 }
3136}
3137
3138static int
3139bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
3140{
3141 struct bnx_softc *sc = ifp->if_softc;
3142 struct ifreq *ifr = (struct ifreq *)data;
3143 int mask, error = 0;
3144
3145 ASSERT_SERIALIZED(ifp->if_serializer);
3146
3147 switch (command) {
3148 case SIOCSIFMTU:
3149 if ((!BNX_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) ||
3150 (BNX_IS_JUMBO_CAPABLE(sc) &&
3151 ifr->ifr_mtu > BNX_JUMBO_MTU)) {
3152 error = EINVAL;
3153 } else if (ifp->if_mtu != ifr->ifr_mtu) {
3154 ifp->if_mtu = ifr->ifr_mtu;
3155 if (ifp->if_flags & IFF_RUNNING)
3156 bnx_init(sc);
3157 }
3158 break;
3159 case SIOCSIFFLAGS:
3160 if (ifp->if_flags & IFF_UP) {
3161 if (ifp->if_flags & IFF_RUNNING) {
3162 mask = ifp->if_flags ^ sc->bnx_if_flags;
3163
3164 /*
3165 * If only the state of the PROMISC flag
3166 * changed, then just use the 'set promisc
3167 * mode' command instead of reinitializing
3168 * the entire NIC. Doing a full re-init
3169 * means reloading the firmware and waiting
3170 * for it to start up, which may take a
3171 * second or two. Similarly for ALLMULTI.
3172 */
3173 if (mask & IFF_PROMISC)
3174 bnx_setpromisc(sc);
3175 if (mask & IFF_ALLMULTI)
3176 bnx_setmulti(sc);
3177 } else {
3178 bnx_init(sc);
3179 }
3180 } else if (ifp->if_flags & IFF_RUNNING) {
3181 bnx_stop(sc);
3182 }
3183 sc->bnx_if_flags = ifp->if_flags;
3184 break;
3185 case SIOCADDMULTI:
3186 case SIOCDELMULTI:
3187 if (ifp->if_flags & IFF_RUNNING)
3188 bnx_setmulti(sc);
3189 break;
3190 case SIOCSIFMEDIA:
3191 case SIOCGIFMEDIA:
3192 if (sc->bnx_flags & BNX_FLAG_TBI) {
3193 error = ifmedia_ioctl(ifp, ifr,
3194 &sc->bnx_ifmedia, command);
3195 } else {
3196 struct mii_data *mii;
3197
3198 mii = device_get_softc(sc->bnx_miibus);
3199 error = ifmedia_ioctl(ifp, ifr,
3200 &mii->mii_media, command);
3201 }
3202 break;
3203 case SIOCSIFCAP:
3204 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3205 if (mask & IFCAP_HWCSUM) {
3206 ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
3207 if (IFCAP_HWCSUM & ifp->if_capenable)
3208 ifp->if_hwassist = BNX_CSUM_FEATURES;
3209 else
3210 ifp->if_hwassist = 0;
3211 }
3212 break;
3213 default:
3214 error = ether_ioctl(ifp, command, data);
3215 break;
3216 }
3217 return error;
3218}
3219
3220static void
3221bnx_watchdog(struct ifnet *ifp)
3222{
3223 struct bnx_softc *sc = ifp->if_softc;
3224
3225 if_printf(ifp, "watchdog timeout -- resetting\n");
3226
3227 bnx_init(sc);
3228
3229 ifp->if_oerrors++;
3230
3231 if (!ifq_is_empty(&ifp->if_snd))
3232 if_devstart(ifp);
3233}
3234
3235/*
3236 * Stop the adapter and free any mbufs allocated to the
3237 * RX and TX lists.
3238 */
3239static void
3240bnx_stop(struct bnx_softc *sc)
3241{
3242 struct ifnet *ifp = &sc->arpcom.ac_if;
3243
3244 ASSERT_SERIALIZED(ifp->if_serializer);
3245
3246 callout_stop(&sc->bnx_stat_timer);
3247
3248 /*
3249 * Disable all of the receiver blocks
3250 */
3251 bnx_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3252 bnx_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3253 bnx_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3254 bnx_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3255 bnx_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3256 bnx_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3257
3258 /*
3259 * Disable all of the transmit blocks
3260 */
3261 bnx_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3262 bnx_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3263 bnx_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3264 bnx_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3265 bnx_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3266 bnx_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3267
3268 /*
3269 * Shut down all of the memory managers and related
3270 * state machines.
3271 */
3272 bnx_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3273 bnx_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3274 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3275 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3276
3277 /* Disable host interrupts. */
3278 bnx_disable_intr(sc);
3279
3280 /*
3281 * Tell firmware we're shutting down.
3282 */
3283 BNX_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3284
3285 /* Free the RX lists. */
3286 bnx_free_rx_ring_std(sc);
3287
3288 /* Free jumbo RX list. */
3289 if (BNX_IS_JUMBO_CAPABLE(sc))
3290 bnx_free_rx_ring_jumbo(sc);
3291
3292 /* Free TX buffers. */
3293 bnx_free_tx_ring(sc);
3294
3295 sc->bnx_status_tag = 0;
3296 sc->bnx_link = 0;
3297 sc->bnx_coal_chg = 0;
3298
3299 sc->bnx_tx_saved_considx = BNX_TXCONS_UNSET;
3300
3301 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3302 ifp->if_timer = 0;
3303}
3304
3305/*
3306 * Stop all chip I/O so that the kernel's probe routines don't
3307 * get confused by errant DMAs when rebooting.
3308 */
3309static void
3310bnx_shutdown(device_t dev)
3311{
3312 struct bnx_softc *sc = device_get_softc(dev);
3313 struct ifnet *ifp = &sc->arpcom.ac_if;
3314
3315 lwkt_serialize_enter(ifp->if_serializer);
3316 bnx_stop(sc);
3317 bnx_reset(sc);
3318 lwkt_serialize_exit(ifp->if_serializer);
3319}
3320
3321static int
3322bnx_suspend(device_t dev)
3323{
3324 struct bnx_softc *sc = device_get_softc(dev);
3325 struct ifnet *ifp = &sc->arpcom.ac_if;
3326
3327 lwkt_serialize_enter(ifp->if_serializer);
3328 bnx_stop(sc);
3329 lwkt_serialize_exit(ifp->if_serializer);
3330
3331 return 0;
3332}
3333
3334static int
3335bnx_resume(device_t dev)
3336{
3337 struct bnx_softc *sc = device_get_softc(dev);
3338 struct ifnet *ifp = &sc->arpcom.ac_if;
3339
3340 lwkt_serialize_enter(ifp->if_serializer);
3341
3342 if (ifp->if_flags & IFF_UP) {
3343 bnx_init(sc);
3344
3345 if (!ifq_is_empty(&ifp->if_snd))
3346 if_devstart(ifp);
3347 }
3348
3349 lwkt_serialize_exit(ifp->if_serializer);
3350
3351 return 0;
3352}
3353
3354static void
3355bnx_setpromisc(struct bnx_softc *sc)
3356{
3357 struct ifnet *ifp = &sc->arpcom.ac_if;
3358
3359 if (ifp->if_flags & IFF_PROMISC)
3360 BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3361 else
3362 BNX_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3363}
3364
3365static void
3366bnx_dma_free(struct bnx_softc *sc)
3367{
3368 int i;
3369
3370 /* Destroy RX mbuf DMA stuffs. */
3371 if (sc->bnx_cdata.bnx_rx_mtag != NULL) {
3372 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3373 bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag,
3374 sc->bnx_cdata.bnx_rx_std_dmamap[i]);
3375 }
3376 bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag,
3377 sc->bnx_cdata.bnx_rx_tmpmap);
3378 bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag);
3379 }
3380
3381 /* Destroy TX mbuf DMA stuffs. */
3382 if (sc->bnx_cdata.bnx_tx_mtag != NULL) {
3383 for (i = 0; i < BGE_TX_RING_CNT; i++) {
3384 bus_dmamap_destroy(sc->bnx_cdata.bnx_tx_mtag,
3385 sc->bnx_cdata.bnx_tx_dmamap[i]);
3386 }
3387 bus_dma_tag_destroy(sc->bnx_cdata.bnx_tx_mtag);
3388 }
3389
3390 /* Destroy standard RX ring */
3391 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_std_ring_tag,
3392 sc->bnx_cdata.bnx_rx_std_ring_map,
3393 sc->bnx_ldata.bnx_rx_std_ring);
3394
3395 if (BNX_IS_JUMBO_CAPABLE(sc))
3396 bnx_free_jumbo_mem(sc);
3397
3398 /* Destroy RX return ring */
3399 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_return_ring_tag,
3400 sc->bnx_cdata.bnx_rx_return_ring_map,
3401 sc->bnx_ldata.bnx_rx_return_ring);
3402
3403 /* Destroy TX ring */
3404 bnx_dma_block_free(sc->bnx_cdata.bnx_tx_ring_tag,
3405 sc->bnx_cdata.bnx_tx_ring_map,
3406 sc->bnx_ldata.bnx_tx_ring);
3407
3408 /* Destroy status block */
3409 bnx_dma_block_free(sc->bnx_cdata.bnx_status_tag,
3410 sc->bnx_cdata.bnx_status_map,
3411 sc->bnx_ldata.bnx_status_block);
3412
3413 /* Destroy the parent tag */
3414 if (sc->bnx_cdata.bnx_parent_tag != NULL)
3415 bus_dma_tag_destroy(sc->bnx_cdata.bnx_parent_tag);
3416}
3417
3418static int
3419bnx_dma_alloc(struct bnx_softc *sc)
3420{
3421 struct ifnet *ifp = &sc->arpcom.ac_if;
3422 int i, error;
3423
3424 /*
3425 * Allocate the parent bus DMA tag appropriate for PCI.
3426 *
3427 * All of the NetExtreme/NetLink controllers have 4GB boundary
3428 * DMA bug.
3429 * Whenever an address crosses a multiple of the 4GB boundary
3430 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
3431 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
3432 * state machine will lockup and cause the device to hang.
3433 */
3434 error = bus_dma_tag_create(NULL, 1, BGE_DMA_BOUNDARY_4G,
3435 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3436 NULL, NULL,
3437 BUS_SPACE_MAXSIZE_32BIT, 0,
3438 BUS_SPACE_MAXSIZE_32BIT,
3439 0, &sc->bnx_cdata.bnx_parent_tag);
3440 if (error) {
3441 if_printf(ifp, "could not allocate parent dma tag\n");
3442 return error;
3443 }
3444
3445 /*
3446 * Create DMA tag and maps for RX mbufs.
3447 */
3448 error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0,
3449 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3450 NULL, NULL, MCLBYTES, 1, MCLBYTES,
3451 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
3452 &sc->bnx_cdata.bnx_rx_mtag);
3453 if (error) {
3454 if_printf(ifp, "could not allocate RX mbuf dma tag\n");
3455 return error;
3456 }
3457
3458 error = bus_dmamap_create(sc->bnx_cdata.bnx_rx_mtag,
3459 BUS_DMA_WAITOK, &sc->bnx_cdata.bnx_rx_tmpmap);
3460 if (error) {
3461 bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag);
3462 sc->bnx_cdata.bnx_rx_mtag = NULL;
3463 return error;
3464 }
3465
3466 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3467 error = bus_dmamap_create(sc->bnx_cdata.bnx_rx_mtag,
3468 BUS_DMA_WAITOK,
3469 &sc->bnx_cdata.bnx_rx_std_dmamap[i]);
3470 if (error) {
3471 int j;
3472
3473 for (j = 0; j < i; ++j) {
3474 bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag,
3475 sc->bnx_cdata.bnx_rx_std_dmamap[j]);
3476 }
3477 bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag);
3478 sc->bnx_cdata.bnx_rx_mtag = NULL;
3479
3480 if_printf(ifp, "could not create DMA map for RX\n");
3481 return error;
3482 }
3483 }
3484
3485 /*
3486 * Create DMA tag and maps for TX mbufs.
3487 */
3488 error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0,
3489 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3490 NULL, NULL,
3491 BNX_JUMBO_FRAMELEN, BNX_NSEG_NEW, MCLBYTES,
3492 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK |
3493 BUS_DMA_ONEBPAGE,
3494 &sc->bnx_cdata.bnx_tx_mtag);
3495 if (error) {
3496 if_printf(ifp, "could not allocate TX mbuf dma tag\n");
3497 return error;
3498 }
3499
3500 for (i = 0; i < BGE_TX_RING_CNT; i++) {
3501 error = bus_dmamap_create(sc->bnx_cdata.bnx_tx_mtag,
3502 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
3503 &sc->bnx_cdata.bnx_tx_dmamap[i]);
3504 if (error) {
3505 int j;
3506
3507 for (j = 0; j < i; ++j) {
3508 bus_dmamap_destroy(sc->bnx_cdata.bnx_tx_mtag,
3509 sc->bnx_cdata.bnx_tx_dmamap[j]);
3510 }
3511 bus_dma_tag_destroy(sc->bnx_cdata.bnx_tx_mtag);
3512 sc->bnx_cdata.bnx_tx_mtag = NULL;
3513
3514 if_printf(ifp, "could not create DMA map for TX\n");
3515 return error;
3516 }
3517 }
3518
3519 /*
3520 * Create DMA stuffs for standard RX ring.
3521 */
3522 error = bnx_dma_block_alloc(sc, BGE_STD_RX_RING_SZ,
3523 &sc->bnx_cdata.bnx_rx_std_ring_tag,
3524 &sc->bnx_cdata.bnx_rx_std_ring_map,
3525 (void *)&sc->bnx_ldata.bnx_rx_std_ring,
3526 &sc->bnx_ldata.bnx_rx_std_ring_paddr);
3527 if (error) {
3528 if_printf(ifp, "could not create std RX ring\n");
3529 return error;
3530 }
3531
3532 /*
3533 * Create jumbo buffer pool.
3534 */
3535 if (BNX_IS_JUMBO_CAPABLE(sc)) {
3536 error = bnx_alloc_jumbo_mem(sc);
3537 if (error) {
3538 if_printf(ifp, "could not create jumbo buffer pool\n");
3539 return error;
3540 }
3541 }
3542
3543 /*
3544 * Create DMA stuffs for RX return ring.
3545 */
3546 error = bnx_dma_block_alloc(sc,
3547 BGE_RX_RTN_RING_SZ(sc->bnx_return_ring_cnt),
3548 &sc->bnx_cdata.bnx_rx_return_ring_tag,
3549 &sc->bnx_cdata.bnx_rx_return_ring_map,
3550 (void *)&sc->bnx_ldata.bnx_rx_return_ring,
3551 &sc->bnx_ldata.bnx_rx_return_ring_paddr);
3552 if (error) {
3553 if_printf(ifp, "could not create RX ret ring\n");
3554 return error;
3555 }
3556
3557 /*
3558 * Create DMA stuffs for TX ring.
3559 */
3560 error = bnx_dma_block_alloc(sc, BGE_TX_RING_SZ,
3561 &sc->bnx_cdata.bnx_tx_ring_tag,
3562 &sc->bnx_cdata.bnx_tx_ring_map,
3563 (void *)&sc->bnx_ldata.bnx_tx_ring,
3564 &sc->bnx_ldata.bnx_tx_ring_paddr);
3565 if (error) {
3566 if_printf(ifp, "could not create TX ring\n");
3567 return error;
3568 }
3569
3570 /*
3571 * Create DMA stuffs for status block.
3572 */
3573 error = bnx_dma_block_alloc(sc, BGE_STATUS_BLK_SZ,
3574 &sc->bnx_cdata.bnx_status_tag,
3575 &sc->bnx_cdata.bnx_status_map,
3576 (void *)&sc->bnx_ldata.bnx_status_block,
3577 &sc->bnx_ldata.bnx_status_block_paddr);
3578 if (error) {
3579 if_printf(ifp, "could not create status block\n");
3580 return error;
3581 }
3582
3583 return 0;
3584}
3585
3586static int
3587bnx_dma_block_alloc(struct bnx_softc *sc, bus_size_t size, bus_dma_tag_t *tag,
3588 bus_dmamap_t *map, void **addr, bus_addr_t *paddr)
3589{
3590 bus_dmamem_t dmem;
3591 int error;
3592
3593 error = bus_dmamem_coherent(sc->bnx_cdata.bnx_parent_tag, PAGE_SIZE, 0,
3594 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3595 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3596 if (error)
3597 return error;
3598
3599 *tag = dmem.dmem_tag;
3600 *map = dmem.dmem_map;
3601 *addr = dmem.dmem_addr;
3602 *paddr = dmem.dmem_busaddr;
3603
3604 return 0;
3605}
3606
3607static void
3608bnx_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr)
3609{
3610 if (tag != NULL) {
3611 bus_dmamap_unload(tag, map);
3612 bus_dmamem_free(tag, addr, map);
3613 bus_dma_tag_destroy(tag);
3614 }
3615}
3616
3617static void
3618bnx_tbi_link_upd(struct bnx_softc *sc, uint32_t status)
3619{
3620 struct ifnet *ifp = &sc->arpcom.ac_if;
3621
3622#define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE)
3623
3624 /*
3625 * Sometimes PCS encoding errors are detected in
3626 * TBI mode (on fiber NICs), and for some reason
3627 * the chip will signal them as link changes.
3628 * If we get a link change event, but the 'PCS
3629 * encoding error' bit in the MAC status register
3630 * is set, don't bother doing a link check.
3631 * This avoids spurious "gigabit link up" messages
3632 * that sometimes appear on fiber NICs during
3633 * periods of heavy traffic.
3634 */
3635 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3636 if (!sc->bnx_link) {
3637 sc->bnx_link++;
3638 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704) {
3639 BNX_CLRBIT(sc, BGE_MAC_MODE,
3640 BGE_MACMODE_TBI_SEND_CFGS);
3641 }
3642 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3643
3644 if (bootverbose)
3645 if_printf(ifp, "link UP\n");
3646
3647 ifp->if_link_state = LINK_STATE_UP;
3648 if_link_state_change(ifp);
3649 }
3650 } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) {
3651 if (sc->bnx_link) {
3652 sc->bnx_link = 0;
3653
3654 if (bootverbose)
3655 if_printf(ifp, "link DOWN\n");
3656
3657 ifp->if_link_state = LINK_STATE_DOWN;
3658 if_link_state_change(ifp);
3659 }
3660 }
3661
3662#undef PCS_ENCODE_ERR
3663
3664 /* Clear the attention. */
3665 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3666 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3667 BGE_MACSTAT_LINK_CHANGED);
3668}
3669
3670static void
3671bnx_copper_link_upd(struct bnx_softc *sc, uint32_t status __unused)
3672{
3673 struct ifnet *ifp = &sc->arpcom.ac_if;
3674 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3675
3676 mii_pollstat(mii);
3677 bnx_miibus_statchg(sc->bnx_dev);
3678
3679 if (bootverbose) {
3680 if (sc->bnx_link)
3681 if_printf(ifp, "link UP\n");
3682 else
3683 if_printf(ifp, "link DOWN\n");
3684 }
3685
3686 /* Clear the attention. */
3687 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3688 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3689 BGE_MACSTAT_LINK_CHANGED);
3690}
3691
3692static void
3693bnx_autopoll_link_upd(struct bnx_softc *sc, uint32_t status __unused)
3694{
3695 struct ifnet *ifp = &sc->arpcom.ac_if;
3696 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3697
3698 mii_pollstat(mii);
3699
3700 if (!sc->bnx_link &&
3701 (mii->mii_media_status & IFM_ACTIVE) &&
3702 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3703 sc->bnx_link++;
3704 if (bootverbose)
3705 if_printf(ifp, "link UP\n");
3706 } else if (sc->bnx_link &&
3707 (!(mii->mii_media_status & IFM_ACTIVE) ||
3708 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3709 sc->bnx_link = 0;
3710 if (bootverbose)
3711 if_printf(ifp, "link DOWN\n");
3712 }
3713
3714 /* Clear the attention. */
3715 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3716 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3717 BGE_MACSTAT_LINK_CHANGED);
3718}
3719
3720static int
3721bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS)
3722{
3723 struct bnx_softc *sc = arg1;
3724
3725 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3726 &sc->bnx_rx_coal_ticks,
3727 BNX_RX_COAL_TICKS_MIN, BNX_RX_COAL_TICKS_MAX,
3728 BNX_RX_COAL_TICKS_CHG);
3729}
3730
3731static int
3732bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS)
3733{
3734 struct bnx_softc *sc = arg1;
3735
3736 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3737 &sc->bnx_tx_coal_ticks,
3738 BNX_TX_COAL_TICKS_MIN, BNX_TX_COAL_TICKS_MAX,
3739 BNX_TX_COAL_TICKS_CHG);
3740}
3741
3742static int
3743bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS)
3744{
3745 struct bnx_softc *sc = arg1;
3746
3747 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3748 &sc->bnx_rx_coal_bds,
3749 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX,
3750 BNX_RX_COAL_BDS_CHG);
3751}
3752
3753static int
3754bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS)
3755{
3756 struct bnx_softc *sc = arg1;
3757
3758 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3759 &sc->bnx_tx_coal_bds,
3760 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX,
3761 BNX_TX_COAL_BDS_CHG);
3762}
3763
3764static int
3765bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS)
3766{
3767 struct bnx_softc *sc = arg1;
3768
3769 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3770 &sc->bnx_rx_coal_bds_int,
3771 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX,
3772 BNX_RX_COAL_BDS_INT_CHG);
3773}
3774
3775static int
3776bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS)
3777{
3778 struct bnx_softc *sc = arg1;
3779
3780 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3781 &sc->bnx_tx_coal_bds_int,
3782 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX,
3783 BNX_TX_COAL_BDS_INT_CHG);
3784}
3785
3786static int
3787bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal,
3788 int coal_min, int coal_max, uint32_t coal_chg_mask)
3789{
3790 struct bnx_softc *sc = arg1;
3791 struct ifnet *ifp = &sc->arpcom.ac_if;
3792 int error = 0, v;
3793
3794 lwkt_serialize_enter(ifp->if_serializer);
3795
3796 v = *coal;
3797 error = sysctl_handle_int(oidp, &v, 0, req);
3798 if (!error && req->newptr != NULL) {
3799 if (v < coal_min || v > coal_max) {
3800 error = EINVAL;
3801 } else {
3802 *coal = v;
3803 sc->bnx_coal_chg |= coal_chg_mask;
3804 }
3805 }
3806
3807 lwkt_serialize_exit(ifp->if_serializer);
3808 return error;
3809}
3810
3811static void
3812bnx_coal_change(struct bnx_softc *sc)
3813{
3814 struct ifnet *ifp = &sc->arpcom.ac_if;
3815 uint32_t val;
3816
3817 ASSERT_SERIALIZED(ifp->if_serializer);
3818
3819 if (sc->bnx_coal_chg & BNX_RX_COAL_TICKS_CHG) {
3820 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS,
3821 sc->bnx_rx_coal_ticks);
3822 DELAY(10);
3823 val = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
3824
3825 if (bootverbose) {
3826 if_printf(ifp, "rx_coal_ticks -> %u\n",
3827 sc->bnx_rx_coal_ticks);
3828 }
3829 }
3830
3831 if (sc->bnx_coal_chg & BNX_TX_COAL_TICKS_CHG) {
3832 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS,
3833 sc->bnx_tx_coal_ticks);
3834 DELAY(10);
3835 val = CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS);
3836
3837 if (bootverbose) {
3838 if_printf(ifp, "tx_coal_ticks -> %u\n",
3839 sc->bnx_tx_coal_ticks);
3840 }
3841 }
3842
3843 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_CHG) {
3844 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS,
3845 sc->bnx_rx_coal_bds);
3846 DELAY(10);
3847 val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
3848
3849 if (bootverbose) {
3850 if_printf(ifp, "rx_coal_bds -> %u\n",
3851 sc->bnx_rx_coal_bds);
3852 }
3853 }
3854
3855 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_CHG) {
3856 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS,
3857 sc->bnx_tx_coal_bds);
3858 DELAY(10);
3859 val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS);
3860
3861 if (bootverbose) {
3862 if_printf(ifp, "tx_max_coal_bds -> %u\n",
3863 sc->bnx_tx_coal_bds);
3864 }
3865 }
3866
3867 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_INT_CHG) {
3868 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT,
3869 sc->bnx_rx_coal_bds_int);
3870 DELAY(10);
3871 val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT);
3872
3873 if (bootverbose) {
3874 if_printf(ifp, "rx_coal_bds_int -> %u\n",
3875 sc->bnx_rx_coal_bds_int);
3876 }
3877 }
3878
3879 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_INT_CHG) {
3880 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT,
3881 sc->bnx_tx_coal_bds_int);
3882 DELAY(10);
3883 val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT);
3884
3885 if (bootverbose) {
3886 if_printf(ifp, "tx_coal_bds_int -> %u\n",
3887 sc->bnx_tx_coal_bds_int);
3888 }
3889 }
3890
3891 sc->bnx_coal_chg = 0;
3892}
3893
3894static void
3895bnx_enable_intr(struct bnx_softc *sc)
3896{
3897 struct ifnet *ifp = &sc->arpcom.ac_if;
3898
3899 lwkt_serialize_handler_enable(ifp->if_serializer);
3900
3901 /*
3902 * Enable interrupt.
3903 */
3904 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24);
3905 if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) {
3906 /* XXX Linux driver */
3907 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24);
3908 }
3909
3910 /*
3911 * Unmask the interrupt when we stop polling.
3912 */
3913 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_MISC_CTL,
3914 BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
3915
3916 /*
3917 * Trigger another interrupt, since above writing
3918 * to interrupt mailbox0 may acknowledge pending
3919 * interrupt.
3920 */
3921 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3922}
3923
3924static void
3925bnx_disable_intr(struct bnx_softc *sc)
3926{
3927 struct ifnet *ifp = &sc->arpcom.ac_if;
3928
3929 /*
3930 * Mask the interrupt when we start polling.
3931 */
3932 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL,
3933 BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
3934
3935 /*
3936 * Acknowledge possible asserted interrupt.
3937 */
3938 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3939
3940 lwkt_serialize_handler_disable(ifp->if_serializer);
3941}
3942
3943static int
3944bnx_get_eaddr_mem(struct bnx_softc *sc, uint8_t ether_addr[])
3945{
3946 uint32_t mac_addr;
3947 int ret = 1;
3948
3949 mac_addr = bnx_readmem_ind(sc, 0x0c14);
3950 if ((mac_addr >> 16) == 0x484b) {
3951 ether_addr[0] = (uint8_t)(mac_addr >> 8);
3952 ether_addr[1] = (uint8_t)mac_addr;
3953 mac_addr = bnx_readmem_ind(sc, 0x0c18);
3954 ether_addr[2] = (uint8_t)(mac_addr >> 24);
3955 ether_addr[3] = (uint8_t)(mac_addr >> 16);
3956 ether_addr[4] = (uint8_t)(mac_addr >> 8);
3957 ether_addr[5] = (uint8_t)mac_addr;
3958 ret = 0;
3959 }
3960 return ret;
3961}
3962
3963static int
3964bnx_get_eaddr_nvram(struct bnx_softc *sc, uint8_t ether_addr[])
3965{
3966 int mac_offset = BGE_EE_MAC_OFFSET;
3967
80969639
SZ
3968 if (BNX_IS_5717_PLUS(sc)) {
3969 int f;
3970
3971 f = pci_get_function(sc->bnx_dev);
3972 if (f & 1)
3973 mac_offset = BGE_EE_MAC_OFFSET_5717;
3974 if (f > 1)
3975 mac_offset += BGE_EE_MAC_OFFSET_5717_OFF;
3976 } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
6c8d8ecc 3977 mac_offset = BGE_EE_MAC_OFFSET_5906;
80969639 3978 }
6c8d8ecc
SZ
3979
3980 return bnx_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN);
3981}
3982
3983static int
3984bnx_get_eaddr_eeprom(struct bnx_softc *sc, uint8_t ether_addr[])
3985{
3986 if (sc->bnx_flags & BNX_FLAG_NO_EEPROM)
3987 return 1;
3988
3989 return bnx_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
3990 ETHER_ADDR_LEN);
3991}
3992
3993static int
3994bnx_get_eaddr(struct bnx_softc *sc, uint8_t eaddr[])
3995{
3996 static const bnx_eaddr_fcn_t bnx_eaddr_funcs[] = {
3997 /* NOTE: Order is critical */
3998 bnx_get_eaddr_mem,
3999 bnx_get_eaddr_nvram,
4000 bnx_get_eaddr_eeprom,
4001 NULL
4002 };
4003 const bnx_eaddr_fcn_t *func;
4004
4005 for (func = bnx_eaddr_funcs; *func != NULL; ++func) {
4006 if ((*func)(sc, eaddr) == 0)
4007 break;
4008 }
4009 return (*func == NULL ? ENXIO : 0);
4010}
4011
4012/*
4013 * NOTE: 'm' is not freed upon failure
4014 */
4015struct mbuf *
4016bnx_defrag_shortdma(struct mbuf *m)
4017{
4018 struct mbuf *n;
4019 int found;
4020
4021 /*
4022 * If device receive two back-to-back send BDs with less than
4023 * or equal to 8 total bytes then the device may hang. The two
4024 * back-to-back send BDs must in the same frame for this failure
4025 * to occur. Scan mbuf chains and see whether two back-to-back
4026 * send BDs are there. If this is the case, allocate new mbuf
4027 * and copy the frame to workaround the silicon bug.
4028 */
4029 for (n = m, found = 0; n != NULL; n = n->m_next) {
4030 if (n->m_len < 8) {
4031 found++;
4032 if (found > 1)
4033 break;
4034 continue;
4035 }
4036 found = 0;
4037 }
4038
4039 if (found > 1)
4040 n = m_defrag(m, MB_DONTWAIT);
4041 else
4042 n = m;
4043 return n;
4044}
4045
4046static void
4047bnx_stop_block(struct bnx_softc *sc, bus_size_t reg, uint32_t bit)
4048{
4049 int i;
4050
4051 BNX_CLRBIT(sc, reg, bit);
4052 for (i = 0; i < BNX_TIMEOUT; i++) {
4053 if ((CSR_READ_4(sc, reg) & bit) == 0)
4054 return;
4055 DELAY(100);
4056 }
4057}
4058
4059static void
4060bnx_link_poll(struct bnx_softc *sc)
4061{
4062 uint32_t status;
4063
4064 status = CSR_READ_4(sc, BGE_MAC_STS);
4065 if ((status & sc->bnx_link_chg) || sc->bnx_link_evt) {
4066 sc->bnx_link_evt = 0;
4067 sc->bnx_link_upd(sc, status);
4068 }
4069}
4070
4071static void
4072bnx_enable_msi(struct bnx_softc *sc)
4073{
4074 uint32_t msi_mode;
4075
4076 msi_mode = CSR_READ_4(sc, BGE_MSI_MODE);
4077 msi_mode |= BGE_MSIMODE_ENABLE;
4078 if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) {
4079 /*
4080 * NOTE:
4081 * 5718-PG105-R says that "one shot" mode
4082 * does not work if MSI is used, however,
4083 * it obviously works.
4084 */
4085 msi_mode &= ~BGE_MSIMODE_ONESHOT_DISABLE;
4086 }
4087 CSR_WRITE_4(sc, BGE_MSI_MODE, msi_mode);
4088}
4089
4090static uint32_t
4091bnx_dma_swap_options(struct bnx_softc *sc)
4092{
4093 uint32_t dma_options;
4094
4095 dma_options = BGE_MODECTL_WORDSWAP_NONFRAME |
4096 BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA;
4097#if BYTE_ORDER == BIG_ENDIAN
4098 dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME;
4099#endif
4100 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
4101 dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA |
4102 BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE |
4103 BGE_MODECTL_HTX2B_ENABLE;
4104 }
4105 return dma_options;
4106}