bnx: Using 57765_FAMILY to conf Low Watermark Maximum Receive Frame Register
[dragonfly.git] / sys / dev / netif / bnx / if_bnx.c
CommitLineData
6c8d8ecc
SZ
1/*
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
34 */
35
36
37#include "opt_polling.h"
38
39#include <sys/param.h>
40#include <sys/bus.h>
41#include <sys/endian.h>
42#include <sys/kernel.h>
43#include <sys/interrupt.h>
44#include <sys/mbuf.h>
45#include <sys/malloc.h>
46#include <sys/queue.h>
47#include <sys/rman.h>
48#include <sys/serialize.h>
49#include <sys/socket.h>
50#include <sys/sockio.h>
51#include <sys/sysctl.h>
52
53#include <net/bpf.h>
54#include <net/ethernet.h>
55#include <net/if.h>
56#include <net/if_arp.h>
57#include <net/if_dl.h>
58#include <net/if_media.h>
59#include <net/if_types.h>
60#include <net/ifq_var.h>
61#include <net/vlan/if_vlan_var.h>
62#include <net/vlan/if_vlan_ether.h>
63
64#include <dev/netif/mii_layer/mii.h>
65#include <dev/netif/mii_layer/miivar.h>
66#include <dev/netif/mii_layer/brgphyreg.h>
67
68#include <bus/pci/pcidevs.h>
69#include <bus/pci/pcireg.h>
70#include <bus/pci/pcivar.h>
71
72#include <dev/netif/bge/if_bgereg.h>
73#include <dev/netif/bnx/if_bnxvar.h>
74
75/* "device miibus" required. See GENERIC if you get errors here. */
76#include "miibus_if.h"
77
3b18363f 78#define BNX_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
6c8d8ecc
SZ
79
80static const struct bnx_type {
81 uint16_t bnx_vid;
82 uint16_t bnx_did;
83 char *bnx_name;
84} bnx_devs[] = {
85 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717,
86 "Broadcom BCM5717 Gigabit Ethernet" },
87 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718,
88 "Broadcom BCM5718 Gigabit Ethernet" },
89 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5719,
90 "Broadcom BCM5719 Gigabit Ethernet" },
91 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720_ALT,
92 "Broadcom BCM5720 Gigabit Ethernet" },
93
94 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57761,
95 "Broadcom BCM57761 Gigabit Ethernet" },
96 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57781,
97 "Broadcom BCM57781 Gigabit Ethernet" },
98 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57791,
99 "Broadcom BCM57791 Fast Ethernet" },
100 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57765,
101 "Broadcom BCM57765 Gigabit Ethernet" },
102 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57785,
103 "Broadcom BCM57785 Gigabit Ethernet" },
104 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57795,
105 "Broadcom BCM57795 Fast Ethernet" },
106
107 { 0, 0, NULL }
108};
109
110#define BNX_IS_JUMBO_CAPABLE(sc) ((sc)->bnx_flags & BNX_FLAG_JUMBO)
111#define BNX_IS_5717_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_5717_PLUS)
f368d0d9
SZ
112#define BNX_IS_57765_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_57765_PLUS)
113#define BNX_IS_57765_FAMILY(sc) \
114 ((sc)->bnx_flags & BNX_FLAG_57765_FAMILY)
6c8d8ecc
SZ
115
116typedef int (*bnx_eaddr_fcn_t)(struct bnx_softc *, uint8_t[]);
117
118static int bnx_probe(device_t);
119static int bnx_attach(device_t);
120static int bnx_detach(device_t);
121static void bnx_shutdown(device_t);
122static int bnx_suspend(device_t);
123static int bnx_resume(device_t);
124static int bnx_miibus_readreg(device_t, int, int);
125static int bnx_miibus_writereg(device_t, int, int, int);
126static void bnx_miibus_statchg(device_t);
127
128#ifdef DEVICE_POLLING
129static void bnx_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
130#endif
131static void bnx_intr_legacy(void *);
132static void bnx_msi(void *);
133static void bnx_msi_oneshot(void *);
134static void bnx_intr(struct bnx_softc *);
135static void bnx_enable_intr(struct bnx_softc *);
136static void bnx_disable_intr(struct bnx_softc *);
137static void bnx_txeof(struct bnx_softc *, uint16_t);
138static void bnx_rxeof(struct bnx_softc *, uint16_t);
139
140static void bnx_start(struct ifnet *);
141static int bnx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
142static void bnx_init(void *);
143static void bnx_stop(struct bnx_softc *);
144static void bnx_watchdog(struct ifnet *);
145static int bnx_ifmedia_upd(struct ifnet *);
146static void bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *);
147static void bnx_tick(void *);
148
149static int bnx_alloc_jumbo_mem(struct bnx_softc *);
150static void bnx_free_jumbo_mem(struct bnx_softc *);
151static struct bnx_jslot
152 *bnx_jalloc(struct bnx_softc *);
153static void bnx_jfree(void *);
154static void bnx_jref(void *);
155static int bnx_newbuf_std(struct bnx_softc *, int, int);
156static int bnx_newbuf_jumbo(struct bnx_softc *, int, int);
157static void bnx_setup_rxdesc_std(struct bnx_softc *, int);
158static void bnx_setup_rxdesc_jumbo(struct bnx_softc *, int);
159static int bnx_init_rx_ring_std(struct bnx_softc *);
160static void bnx_free_rx_ring_std(struct bnx_softc *);
161static int bnx_init_rx_ring_jumbo(struct bnx_softc *);
162static void bnx_free_rx_ring_jumbo(struct bnx_softc *);
163static void bnx_free_tx_ring(struct bnx_softc *);
164static int bnx_init_tx_ring(struct bnx_softc *);
165static int bnx_dma_alloc(struct bnx_softc *);
166static void bnx_dma_free(struct bnx_softc *);
167static int bnx_dma_block_alloc(struct bnx_softc *, bus_size_t,
168 bus_dma_tag_t *, bus_dmamap_t *, void **, bus_addr_t *);
169static void bnx_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
170static struct mbuf *
171 bnx_defrag_shortdma(struct mbuf *);
172static int bnx_encap(struct bnx_softc *, struct mbuf **, uint32_t *);
173
174static void bnx_reset(struct bnx_softc *);
175static int bnx_chipinit(struct bnx_softc *);
176static int bnx_blockinit(struct bnx_softc *);
177static void bnx_stop_block(struct bnx_softc *, bus_size_t, uint32_t);
178static void bnx_enable_msi(struct bnx_softc *sc);
179static void bnx_setmulti(struct bnx_softc *);
180static void bnx_setpromisc(struct bnx_softc *);
181static void bnx_stats_update_regs(struct bnx_softc *);
182static uint32_t bnx_dma_swap_options(struct bnx_softc *);
183
184static uint32_t bnx_readmem_ind(struct bnx_softc *, uint32_t);
185static void bnx_writemem_ind(struct bnx_softc *, uint32_t, uint32_t);
186#ifdef notdef
187static uint32_t bnx_readreg_ind(struct bnx_softc *, uint32_t);
188#endif
189static void bnx_writereg_ind(struct bnx_softc *, uint32_t, uint32_t);
190static void bnx_writemem_direct(struct bnx_softc *, uint32_t, uint32_t);
191static void bnx_writembx(struct bnx_softc *, int, int);
192static uint8_t bnx_nvram_getbyte(struct bnx_softc *, int, uint8_t *);
193static int bnx_read_nvram(struct bnx_softc *, caddr_t, int, int);
194static uint8_t bnx_eeprom_getbyte(struct bnx_softc *, uint32_t, uint8_t *);
195static int bnx_read_eeprom(struct bnx_softc *, caddr_t, uint32_t, size_t);
196
197static void bnx_tbi_link_upd(struct bnx_softc *, uint32_t);
198static void bnx_copper_link_upd(struct bnx_softc *, uint32_t);
199static void bnx_autopoll_link_upd(struct bnx_softc *, uint32_t);
200static void bnx_link_poll(struct bnx_softc *);
201
202static int bnx_get_eaddr_mem(struct bnx_softc *, uint8_t[]);
203static int bnx_get_eaddr_nvram(struct bnx_softc *, uint8_t[]);
204static int bnx_get_eaddr_eeprom(struct bnx_softc *, uint8_t[]);
205static int bnx_get_eaddr(struct bnx_softc *, uint8_t[]);
206
207static void bnx_coal_change(struct bnx_softc *);
208static int bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS);
209static int bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS);
210static int bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS);
211static int bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS);
212static int bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS);
213static int bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS);
214static int bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *,
215 int, int, uint32_t);
216
217static int bnx_msi_enable = 1;
218TUNABLE_INT("hw.bnx.msi.enable", &bnx_msi_enable);
219
220static device_method_t bnx_methods[] = {
221 /* Device interface */
222 DEVMETHOD(device_probe, bnx_probe),
223 DEVMETHOD(device_attach, bnx_attach),
224 DEVMETHOD(device_detach, bnx_detach),
225 DEVMETHOD(device_shutdown, bnx_shutdown),
226 DEVMETHOD(device_suspend, bnx_suspend),
227 DEVMETHOD(device_resume, bnx_resume),
228
229 /* bus interface */
230 DEVMETHOD(bus_print_child, bus_generic_print_child),
231 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
232
233 /* MII interface */
234 DEVMETHOD(miibus_readreg, bnx_miibus_readreg),
235 DEVMETHOD(miibus_writereg, bnx_miibus_writereg),
236 DEVMETHOD(miibus_statchg, bnx_miibus_statchg),
237
238 { 0, 0 }
239};
240
241static DEFINE_CLASS_0(bnx, bnx_driver, bnx_methods, sizeof(struct bnx_softc));
242static devclass_t bnx_devclass;
243
244DECLARE_DUMMY_MODULE(if_bnx);
245DRIVER_MODULE(if_bnx, pci, bnx_driver, bnx_devclass, NULL, NULL);
246DRIVER_MODULE(miibus, bnx, miibus_driver, miibus_devclass, NULL, NULL);
247
248static uint32_t
249bnx_readmem_ind(struct bnx_softc *sc, uint32_t off)
250{
251 device_t dev = sc->bnx_dev;
252 uint32_t val;
253
254 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
255 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
256 return 0;
257
258 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
259 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
260 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
261 return (val);
262}
263
264static void
265bnx_writemem_ind(struct bnx_softc *sc, uint32_t off, uint32_t val)
266{
267 device_t dev = sc->bnx_dev;
268
269 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
270 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
271 return;
272
273 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
274 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
275 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
276}
277
278#ifdef notdef
279static uint32_t
280bnx_readreg_ind(struct bnx_softc *sc, uin32_t off)
281{
282 device_t dev = sc->bnx_dev;
283
284 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
285 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
286}
287#endif
288
289static void
290bnx_writereg_ind(struct bnx_softc *sc, uint32_t off, uint32_t val)
291{
292 device_t dev = sc->bnx_dev;
293
294 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
295 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
296}
297
298static void
299bnx_writemem_direct(struct bnx_softc *sc, uint32_t off, uint32_t val)
300{
301 CSR_WRITE_4(sc, off, val);
302}
303
304static void
305bnx_writembx(struct bnx_softc *sc, int off, int val)
306{
307 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906)
308 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
309
310 CSR_WRITE_4(sc, off, val);
311}
312
313static uint8_t
314bnx_nvram_getbyte(struct bnx_softc *sc, int addr, uint8_t *dest)
315{
316 uint32_t access, byte = 0;
317 int i;
318
319 /* Lock. */
320 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
321 for (i = 0; i < 8000; i++) {
322 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
323 break;
324 DELAY(20);
325 }
326 if (i == 8000)
327 return (1);
328
329 /* Enable access. */
330 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
331 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
332
333 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
334 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
335 for (i = 0; i < BNX_TIMEOUT * 10; i++) {
336 DELAY(10);
337 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
338 DELAY(10);
339 break;
340 }
341 }
342
343 if (i == BNX_TIMEOUT * 10) {
344 if_printf(&sc->arpcom.ac_if, "nvram read timed out\n");
345 return (1);
346 }
347
348 /* Get result. */
349 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
350
351 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
352
353 /* Disable access. */
354 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
355
356 /* Unlock. */
357 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
358 CSR_READ_4(sc, BGE_NVRAM_SWARB);
359
360 return (0);
361}
362
363/*
364 * Read a sequence of bytes from NVRAM.
365 */
366static int
367bnx_read_nvram(struct bnx_softc *sc, caddr_t dest, int off, int cnt)
368{
369 int err = 0, i;
370 uint8_t byte = 0;
371
372 if (sc->bnx_asicrev != BGE_ASICREV_BCM5906)
373 return (1);
374
375 for (i = 0; i < cnt; i++) {
376 err = bnx_nvram_getbyte(sc, off + i, &byte);
377 if (err)
378 break;
379 *(dest + i) = byte;
380 }
381
382 return (err ? 1 : 0);
383}
384
385/*
386 * Read a byte of data stored in the EEPROM at address 'addr.' The
387 * BCM570x supports both the traditional bitbang interface and an
388 * auto access interface for reading the EEPROM. We use the auto
389 * access method.
390 */
391static uint8_t
392bnx_eeprom_getbyte(struct bnx_softc *sc, uint32_t addr, uint8_t *dest)
393{
394 int i;
395 uint32_t byte = 0;
396
397 /*
398 * Enable use of auto EEPROM access so we can avoid
399 * having to use the bitbang method.
400 */
401 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
402
403 /* Reset the EEPROM, load the clock period. */
404 CSR_WRITE_4(sc, BGE_EE_ADDR,
405 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
406 DELAY(20);
407
408 /* Issue the read EEPROM command. */
409 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
410
411 /* Wait for completion */
412 for(i = 0; i < BNX_TIMEOUT * 10; i++) {
413 DELAY(10);
414 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
415 break;
416 }
417
418 if (i == BNX_TIMEOUT) {
419 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
420 return(1);
421 }
422
423 /* Get result. */
424 byte = CSR_READ_4(sc, BGE_EE_DATA);
425
426 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
427
428 return(0);
429}
430
431/*
432 * Read a sequence of bytes from the EEPROM.
433 */
434static int
435bnx_read_eeprom(struct bnx_softc *sc, caddr_t dest, uint32_t off, size_t len)
436{
437 size_t i;
438 int err;
439 uint8_t byte;
440
441 for (byte = 0, err = 0, i = 0; i < len; i++) {
442 err = bnx_eeprom_getbyte(sc, off + i, &byte);
443 if (err)
444 break;
445 *(dest + i) = byte;
446 }
447
448 return(err ? 1 : 0);
449}
450
451static int
452bnx_miibus_readreg(device_t dev, int phy, int reg)
453{
454 struct bnx_softc *sc = device_get_softc(dev);
455 uint32_t val;
456 int i;
457
458 KASSERT(phy == sc->bnx_phyno,
459 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno));
460
461 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
462 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
463 CSR_WRITE_4(sc, BGE_MI_MODE,
464 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
465 DELAY(80);
466 }
467
468 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
469 BGE_MIPHY(phy) | BGE_MIREG(reg));
470
471 /* Poll for the PHY register access to complete. */
472 for (i = 0; i < BNX_TIMEOUT; i++) {
473 DELAY(10);
474 val = CSR_READ_4(sc, BGE_MI_COMM);
475 if ((val & BGE_MICOMM_BUSY) == 0) {
476 DELAY(5);
477 val = CSR_READ_4(sc, BGE_MI_COMM);
478 break;
479 }
480 }
481 if (i == BNX_TIMEOUT) {
482 if_printf(&sc->arpcom.ac_if, "PHY read timed out "
483 "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val);
484 val = 0;
485 }
486
487 /* Restore the autopoll bit if necessary. */
488 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
489 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
490 DELAY(80);
491 }
492
493 if (val & BGE_MICOMM_READFAIL)
494 return 0;
495
496 return (val & 0xFFFF);
497}
498
499static int
500bnx_miibus_writereg(device_t dev, int phy, int reg, int val)
501{
502 struct bnx_softc *sc = device_get_softc(dev);
503 int i;
504
505 KASSERT(phy == sc->bnx_phyno,
506 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno));
507
508 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
509 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
510 return 0;
511
512 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
513 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
514 CSR_WRITE_4(sc, BGE_MI_MODE,
515 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
516 DELAY(80);
517 }
518
519 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
520 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
521
522 for (i = 0; i < BNX_TIMEOUT; i++) {
523 DELAY(10);
524 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
525 DELAY(5);
526 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
527 break;
528 }
529 }
530 if (i == BNX_TIMEOUT) {
531 if_printf(&sc->arpcom.ac_if, "PHY write timed out "
532 "(phy %d, reg %d, val %d)\n", phy, reg, val);
533 }
534
535 /* Restore the autopoll bit if necessary. */
536 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
537 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
538 DELAY(80);
539 }
540
541 return 0;
542}
543
544static void
545bnx_miibus_statchg(device_t dev)
546{
547 struct bnx_softc *sc;
548 struct mii_data *mii;
549
550 sc = device_get_softc(dev);
551 mii = device_get_softc(sc->bnx_miibus);
552
553 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
554 (IFM_ACTIVE | IFM_AVALID)) {
555 switch (IFM_SUBTYPE(mii->mii_media_active)) {
556 case IFM_10_T:
557 case IFM_100_TX:
558 sc->bnx_link = 1;
559 break;
560 case IFM_1000_T:
561 case IFM_1000_SX:
562 case IFM_2500_SX:
563 if (sc->bnx_asicrev != BGE_ASICREV_BCM5906)
564 sc->bnx_link = 1;
565 else
566 sc->bnx_link = 0;
567 break;
568 default:
569 sc->bnx_link = 0;
570 break;
571 }
572 } else {
573 sc->bnx_link = 0;
574 }
575 if (sc->bnx_link == 0)
576 return;
577
578 BNX_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
579 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
580 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
581 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
582 } else {
583 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
584 }
585
586 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
587 BNX_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
588 } else {
589 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
590 }
591}
592
593/*
594 * Memory management for jumbo frames.
595 */
596static int
597bnx_alloc_jumbo_mem(struct bnx_softc *sc)
598{
599 struct ifnet *ifp = &sc->arpcom.ac_if;
600 struct bnx_jslot *entry;
601 uint8_t *ptr;
602 bus_addr_t paddr;
603 int i, error;
604
605 /*
606 * Create tag for jumbo mbufs.
607 * This is really a bit of a kludge. We allocate a special
608 * jumbo buffer pool which (thanks to the way our DMA
609 * memory allocation works) will consist of contiguous
610 * pages. This means that even though a jumbo buffer might
611 * be larger than a page size, we don't really need to
612 * map it into more than one DMA segment. However, the
613 * default mbuf tag will result in multi-segment mappings,
614 * so we have to create a special jumbo mbuf tag that
615 * lets us get away with mapping the jumbo buffers as
616 * a single segment. I think eventually the driver should
617 * be changed so that it uses ordinary mbufs and cluster
618 * buffers, i.e. jumbo frames can span multiple DMA
619 * descriptors. But that's a project for another day.
620 */
621
622 /*
623 * Create DMA stuffs for jumbo RX ring.
624 */
625 error = bnx_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ,
626 &sc->bnx_cdata.bnx_rx_jumbo_ring_tag,
627 &sc->bnx_cdata.bnx_rx_jumbo_ring_map,
628 (void *)&sc->bnx_ldata.bnx_rx_jumbo_ring,
629 &sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
630 if (error) {
631 if_printf(ifp, "could not create jumbo RX ring\n");
632 return error;
633 }
634
635 /*
636 * Create DMA stuffs for jumbo buffer block.
637 */
638 error = bnx_dma_block_alloc(sc, BNX_JMEM,
639 &sc->bnx_cdata.bnx_jumbo_tag,
640 &sc->bnx_cdata.bnx_jumbo_map,
641 (void **)&sc->bnx_ldata.bnx_jumbo_buf,
642 &paddr);
643 if (error) {
644 if_printf(ifp, "could not create jumbo buffer\n");
645 return error;
646 }
647
648 SLIST_INIT(&sc->bnx_jfree_listhead);
649
650 /*
651 * Now divide it up into 9K pieces and save the addresses
652 * in an array. Note that we play an evil trick here by using
653 * the first few bytes in the buffer to hold the the address
654 * of the softc structure for this interface. This is because
655 * bnx_jfree() needs it, but it is called by the mbuf management
656 * code which will not pass it to us explicitly.
657 */
658 for (i = 0, ptr = sc->bnx_ldata.bnx_jumbo_buf; i < BNX_JSLOTS; i++) {
659 entry = &sc->bnx_cdata.bnx_jslots[i];
660 entry->bnx_sc = sc;
661 entry->bnx_buf = ptr;
662 entry->bnx_paddr = paddr;
663 entry->bnx_inuse = 0;
664 entry->bnx_slot = i;
665 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead, entry, jslot_link);
666
667 ptr += BNX_JLEN;
668 paddr += BNX_JLEN;
669 }
670 return 0;
671}
672
673static void
674bnx_free_jumbo_mem(struct bnx_softc *sc)
675{
676 /* Destroy jumbo RX ring. */
677 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_jumbo_ring_tag,
678 sc->bnx_cdata.bnx_rx_jumbo_ring_map,
679 sc->bnx_ldata.bnx_rx_jumbo_ring);
680
681 /* Destroy jumbo buffer block. */
682 bnx_dma_block_free(sc->bnx_cdata.bnx_jumbo_tag,
683 sc->bnx_cdata.bnx_jumbo_map,
684 sc->bnx_ldata.bnx_jumbo_buf);
685}
686
687/*
688 * Allocate a jumbo buffer.
689 */
690static struct bnx_jslot *
691bnx_jalloc(struct bnx_softc *sc)
692{
693 struct bnx_jslot *entry;
694
695 lwkt_serialize_enter(&sc->bnx_jslot_serializer);
696 entry = SLIST_FIRST(&sc->bnx_jfree_listhead);
697 if (entry) {
698 SLIST_REMOVE_HEAD(&sc->bnx_jfree_listhead, jslot_link);
699 entry->bnx_inuse = 1;
700 } else {
701 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
702 }
703 lwkt_serialize_exit(&sc->bnx_jslot_serializer);
704 return(entry);
705}
706
707/*
708 * Adjust usage count on a jumbo buffer.
709 */
710static void
711bnx_jref(void *arg)
712{
713 struct bnx_jslot *entry = (struct bnx_jslot *)arg;
714 struct bnx_softc *sc = entry->bnx_sc;
715
716 if (sc == NULL)
717 panic("bnx_jref: can't find softc pointer!");
718
719 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) {
720 panic("bnx_jref: asked to reference buffer "
721 "that we don't manage!");
722 } else if (entry->bnx_inuse == 0) {
723 panic("bnx_jref: buffer already free!");
724 } else {
725 atomic_add_int(&entry->bnx_inuse, 1);
726 }
727}
728
729/*
730 * Release a jumbo buffer.
731 */
732static void
733bnx_jfree(void *arg)
734{
735 struct bnx_jslot *entry = (struct bnx_jslot *)arg;
736 struct bnx_softc *sc = entry->bnx_sc;
737
738 if (sc == NULL)
739 panic("bnx_jfree: can't find softc pointer!");
740
741 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) {
742 panic("bnx_jfree: asked to free buffer that we don't manage!");
743 } else if (entry->bnx_inuse == 0) {
744 panic("bnx_jfree: buffer already free!");
745 } else {
746 /*
747 * Possible MP race to 0, use the serializer. The atomic insn
748 * is still needed for races against bnx_jref().
749 */
750 lwkt_serialize_enter(&sc->bnx_jslot_serializer);
751 atomic_subtract_int(&entry->bnx_inuse, 1);
752 if (entry->bnx_inuse == 0) {
753 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead,
754 entry, jslot_link);
755 }
756 lwkt_serialize_exit(&sc->bnx_jslot_serializer);
757 }
758}
759
760
761/*
762 * Intialize a standard receive ring descriptor.
763 */
764static int
765bnx_newbuf_std(struct bnx_softc *sc, int i, int init)
766{
767 struct mbuf *m_new = NULL;
768 bus_dma_segment_t seg;
769 bus_dmamap_t map;
770 int error, nsegs;
771
772 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
773 if (m_new == NULL)
774 return ENOBUFS;
775 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
776 m_adj(m_new, ETHER_ALIGN);
777
778 error = bus_dmamap_load_mbuf_segment(sc->bnx_cdata.bnx_rx_mtag,
779 sc->bnx_cdata.bnx_rx_tmpmap, m_new,
780 &seg, 1, &nsegs, BUS_DMA_NOWAIT);
781 if (error) {
782 m_freem(m_new);
783 return error;
784 }
785
786 if (!init) {
787 bus_dmamap_sync(sc->bnx_cdata.bnx_rx_mtag,
788 sc->bnx_cdata.bnx_rx_std_dmamap[i],
789 BUS_DMASYNC_POSTREAD);
790 bus_dmamap_unload(sc->bnx_cdata.bnx_rx_mtag,
791 sc->bnx_cdata.bnx_rx_std_dmamap[i]);
792 }
793
794 map = sc->bnx_cdata.bnx_rx_tmpmap;
795 sc->bnx_cdata.bnx_rx_tmpmap = sc->bnx_cdata.bnx_rx_std_dmamap[i];
796 sc->bnx_cdata.bnx_rx_std_dmamap[i] = map;
797
798 sc->bnx_cdata.bnx_rx_std_chain[i].bnx_mbuf = m_new;
799 sc->bnx_cdata.bnx_rx_std_chain[i].bnx_paddr = seg.ds_addr;
800
801 bnx_setup_rxdesc_std(sc, i);
802 return 0;
803}
804
805static void
806bnx_setup_rxdesc_std(struct bnx_softc *sc, int i)
807{
808 struct bnx_rxchain *rc;
809 struct bge_rx_bd *r;
810
811 rc = &sc->bnx_cdata.bnx_rx_std_chain[i];
812 r = &sc->bnx_ldata.bnx_rx_std_ring[i];
813
814 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_paddr);
815 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_paddr);
816 r->bge_len = rc->bnx_mbuf->m_len;
817 r->bge_idx = i;
818 r->bge_flags = BGE_RXBDFLAG_END;
819}
820
821/*
822 * Initialize a jumbo receive ring descriptor. This allocates
823 * a jumbo buffer from the pool managed internally by the driver.
824 */
825static int
826bnx_newbuf_jumbo(struct bnx_softc *sc, int i, int init)
827{
828 struct mbuf *m_new = NULL;
829 struct bnx_jslot *buf;
830 bus_addr_t paddr;
831
832 /* Allocate the mbuf. */
833 MGETHDR(m_new, init ? MB_WAIT : MB_DONTWAIT, MT_DATA);
834 if (m_new == NULL)
835 return ENOBUFS;
836
837 /* Allocate the jumbo buffer */
838 buf = bnx_jalloc(sc);
839 if (buf == NULL) {
840 m_freem(m_new);
841 return ENOBUFS;
842 }
843
844 /* Attach the buffer to the mbuf. */
845 m_new->m_ext.ext_arg = buf;
846 m_new->m_ext.ext_buf = buf->bnx_buf;
847 m_new->m_ext.ext_free = bnx_jfree;
848 m_new->m_ext.ext_ref = bnx_jref;
849 m_new->m_ext.ext_size = BNX_JUMBO_FRAMELEN;
850
851 m_new->m_flags |= M_EXT;
852
853 m_new->m_data = m_new->m_ext.ext_buf;
854 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
855
856 paddr = buf->bnx_paddr;
857 m_adj(m_new, ETHER_ALIGN);
858 paddr += ETHER_ALIGN;
859
860 /* Save necessary information */
861 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_mbuf = m_new;
862 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_paddr = paddr;
863
864 /* Set up the descriptor. */
865 bnx_setup_rxdesc_jumbo(sc, i);
866 return 0;
867}
868
869static void
870bnx_setup_rxdesc_jumbo(struct bnx_softc *sc, int i)
871{
872 struct bge_rx_bd *r;
873 struct bnx_rxchain *rc;
874
875 r = &sc->bnx_ldata.bnx_rx_jumbo_ring[i];
876 rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i];
877
878 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_paddr);
879 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_paddr);
880 r->bge_len = rc->bnx_mbuf->m_len;
881 r->bge_idx = i;
882 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
883}
884
885static int
886bnx_init_rx_ring_std(struct bnx_softc *sc)
887{
888 int i, error;
889
890 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
891 error = bnx_newbuf_std(sc, i, 1);
892 if (error)
893 return error;
894 };
895
896 sc->bnx_std = BGE_STD_RX_RING_CNT - 1;
897 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bnx_std);
898
899 return(0);
900}
901
902static void
903bnx_free_rx_ring_std(struct bnx_softc *sc)
904{
905 int i;
906
907 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
908 struct bnx_rxchain *rc = &sc->bnx_cdata.bnx_rx_std_chain[i];
909
910 if (rc->bnx_mbuf != NULL) {
911 bus_dmamap_unload(sc->bnx_cdata.bnx_rx_mtag,
912 sc->bnx_cdata.bnx_rx_std_dmamap[i]);
913 m_freem(rc->bnx_mbuf);
914 rc->bnx_mbuf = NULL;
915 }
916 bzero(&sc->bnx_ldata.bnx_rx_std_ring[i],
917 sizeof(struct bge_rx_bd));
918 }
919}
920
921static int
922bnx_init_rx_ring_jumbo(struct bnx_softc *sc)
923{
924 struct bge_rcb *rcb;
925 int i, error;
926
927 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
928 error = bnx_newbuf_jumbo(sc, i, 1);
929 if (error)
930 return error;
931 };
932
933 sc->bnx_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
934
935 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb;
936 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
937 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
938
939 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo);
940
941 return(0);
942}
943
944static void
945bnx_free_rx_ring_jumbo(struct bnx_softc *sc)
946{
947 int i;
948
949 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
950 struct bnx_rxchain *rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i];
951
952 if (rc->bnx_mbuf != NULL) {
953 m_freem(rc->bnx_mbuf);
954 rc->bnx_mbuf = NULL;
955 }
956 bzero(&sc->bnx_ldata.bnx_rx_jumbo_ring[i],
957 sizeof(struct bge_rx_bd));
958 }
959}
960
961static void
962bnx_free_tx_ring(struct bnx_softc *sc)
963{
964 int i;
965
966 for (i = 0; i < BGE_TX_RING_CNT; i++) {
967 if (sc->bnx_cdata.bnx_tx_chain[i] != NULL) {
968 bus_dmamap_unload(sc->bnx_cdata.bnx_tx_mtag,
969 sc->bnx_cdata.bnx_tx_dmamap[i]);
970 m_freem(sc->bnx_cdata.bnx_tx_chain[i]);
971 sc->bnx_cdata.bnx_tx_chain[i] = NULL;
972 }
973 bzero(&sc->bnx_ldata.bnx_tx_ring[i],
974 sizeof(struct bge_tx_bd));
975 }
976}
977
978static int
979bnx_init_tx_ring(struct bnx_softc *sc)
980{
981 sc->bnx_txcnt = 0;
982 sc->bnx_tx_saved_considx = 0;
983 sc->bnx_tx_prodidx = 0;
984
985 /* Initialize transmit producer index for host-memory send ring. */
986 bnx_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bnx_tx_prodidx);
987 bnx_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
988
989 return(0);
990}
991
992static void
993bnx_setmulti(struct bnx_softc *sc)
994{
995 struct ifnet *ifp;
996 struct ifmultiaddr *ifma;
997 uint32_t hashes[4] = { 0, 0, 0, 0 };
998 int h, i;
999
1000 ifp = &sc->arpcom.ac_if;
1001
1002 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1003 for (i = 0; i < 4; i++)
1004 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1005 return;
1006 }
1007
1008 /* First, zot all the existing filters. */
1009 for (i = 0; i < 4; i++)
1010 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1011
1012 /* Now program new ones. */
1013 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1014 if (ifma->ifma_addr->sa_family != AF_LINK)
1015 continue;
1016 h = ether_crc32_le(
1017 LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1018 ETHER_ADDR_LEN) & 0x7f;
1019 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1020 }
1021
1022 for (i = 0; i < 4; i++)
1023 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1024}
1025
1026/*
1027 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1028 * self-test results.
1029 */
1030static int
1031bnx_chipinit(struct bnx_softc *sc)
1032{
1033 uint32_t dma_rw_ctl, mode_ctl;
1034 int i;
1035
1036 /* Set endian type before we access any non-PCI registers. */
1037 pci_write_config(sc->bnx_dev, BGE_PCI_MISC_CTL,
1038 BGE_INIT | BGE_PCIMISCCTL_TAGGED_STATUS, 4);
1039
1040 /* Clear the MAC control register */
1041 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1042
1043 /*
1044 * Clear the MAC statistics block in the NIC's
1045 * internal memory.
1046 */
1047 for (i = BGE_STATS_BLOCK;
1048 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1049 BNX_MEMWIN_WRITE(sc, i, 0);
1050
1051 for (i = BGE_STATUS_BLOCK;
1052 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1053 BNX_MEMWIN_WRITE(sc, i, 0);
1054
d7872545
SZ
1055 if (BNX_IS_57765_FAMILY(sc)) {
1056 uint32_t val;
1057
1058 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) {
1059 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
1060 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS;
1061
1062 /* Access the lower 1K of PL PCI-E block registers. */
1063 CSR_WRITE_4(sc, BGE_MODE_CTL,
1064 val | BGE_MODECTL_PCIE_PL_SEL);
1065
1066 val = CSR_READ_4(sc, BGE_PCIE_PL_LO_PHYCTL5);
1067 val |= BGE_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ;
1068 CSR_WRITE_4(sc, BGE_PCIE_PL_LO_PHYCTL5, val);
1069
1070 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1071 }
1072 if (sc->bnx_chiprev != BGE_CHIPREV_57765_AX) {
1073 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
1074 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS;
1075
1076 /* Access the lower 1K of DL PCI-E block registers. */
1077 CSR_WRITE_4(sc, BGE_MODE_CTL,
1078 val | BGE_MODECTL_PCIE_DL_SEL);
1079
1080 val = CSR_READ_4(sc, BGE_PCIE_DL_LO_FTSMAX);
1081 val &= ~BGE_PCIE_DL_LO_FTSMAX_MASK;
1082 val |= BGE_PCIE_DL_LO_FTSMAX_VAL;
1083 CSR_WRITE_4(sc, BGE_PCIE_DL_LO_FTSMAX, val);
1084
1085 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1086 }
1087
1088 val = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK);
1089 val &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK;
1090 val |= BGE_CPMU_LSPD_10MB_MACCLK_6_25;
1091 CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, val);
1092 }
1093
6c8d8ecc
SZ
1094 /* Set up the PCI DMA control register. */
1095 dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD |
1096 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1097
f368d0d9 1098 if (BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
1099 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1100 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0)
1101 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1102 /*
1103 * Enable HW workaround for controllers that misinterpret
1104 * a status tag update and leave interrupts permanently
1105 * disabled.
1106 */
1107 if (sc->bnx_asicrev != BGE_ASICREV_BCM5717 &&
1108 sc->bnx_asicrev != BGE_ASICREV_BCM57765)
1109 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1110 }
1111 pci_write_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1112
1113 /*
1114 * Set up general mode register.
1115 */
1116 mode_ctl = bnx_dma_swap_options(sc) | BGE_MODECTL_MAC_ATTN_INTR |
1117 BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM;
1118 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1119
1120 /*
1121 * Disable memory write invalidate. Apparently it is not supported
1122 * properly by these devices. Also ensure that INTx isn't disabled,
1123 * as these chips need it even when using MSI.
1124 */
1125 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_CMD,
1126 (PCIM_CMD_MWRICEN | PCIM_CMD_INTxDIS), 4);
1127
1128 /* Set the timer prescaler (always 66Mhz) */
1129 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1130
1131 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
1132 DELAY(40); /* XXX */
1133
1134 /* Put PHY into ready state */
1135 BNX_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1136 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1137 DELAY(40);
1138 }
1139
1140 return(0);
1141}
1142
1143static int
1144bnx_blockinit(struct bnx_softc *sc)
1145{
1146 struct bge_rcb *rcb;
1147 bus_size_t vrcb;
1148 bge_hostaddr taddr;
1149 uint32_t val;
1150 int i, limit;
1151
1152 /*
1153 * Initialize the memory window pointer register so that
1154 * we can access the first 32K of internal NIC RAM. This will
1155 * allow us to set up the TX send ring RCBs and the RX return
1156 * ring RCBs, plus other things which live in NIC memory.
1157 */
1158 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1159
1160 /* Configure mbuf pool watermarks */
f368d0d9 1161 if (BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
1162 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1163 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) {
1164 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1165 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1166 } else {
1167 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1168 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1169 }
1170 } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
1171 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1172 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1173 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1174 } else {
1175 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1176 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1177 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1178 }
1179
1180 /* Configure DMA resource watermarks */
1181 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1182 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1183
1184 /* Enable buffer manager */
1185 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1186 /*
1187 * Change the arbitration algorithm of TXMBUF read request to
1188 * round-robin instead of priority based for BCM5719. When
1189 * TXFIFO is almost empty, RDMA will hold its request until
1190 * TXFIFO is not almost empty.
1191 */
1192 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719)
1193 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
1194 CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1195
1196 /* Poll for buffer manager start indication */
1197 for (i = 0; i < BNX_TIMEOUT; i++) {
1198 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1199 break;
1200 DELAY(10);
1201 }
1202
1203 if (i == BNX_TIMEOUT) {
1204 if_printf(&sc->arpcom.ac_if,
1205 "buffer manager failed to start\n");
1206 return(ENXIO);
1207 }
1208
1209 /* Enable flow-through queues */
1210 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1211 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1212
1213 /* Wait until queue initialization is complete */
1214 for (i = 0; i < BNX_TIMEOUT; i++) {
1215 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1216 break;
1217 DELAY(10);
1218 }
1219
1220 if (i == BNX_TIMEOUT) {
1221 if_printf(&sc->arpcom.ac_if,
1222 "flow-through queue init failed\n");
1223 return(ENXIO);
1224 }
1225
1226 /*
1227 * Summary of rings supported by the controller:
1228 *
1229 * Standard Receive Producer Ring
1230 * - This ring is used to feed receive buffers for "standard"
1231 * sized frames (typically 1536 bytes) to the controller.
1232 *
1233 * Jumbo Receive Producer Ring
1234 * - This ring is used to feed receive buffers for jumbo sized
1235 * frames (i.e. anything bigger than the "standard" frames)
1236 * to the controller.
1237 *
1238 * Mini Receive Producer Ring
1239 * - This ring is used to feed receive buffers for "mini"
1240 * sized frames to the controller.
1241 * - This feature required external memory for the controller
1242 * but was never used in a production system. Should always
1243 * be disabled.
1244 *
1245 * Receive Return Ring
1246 * - After the controller has placed an incoming frame into a
1247 * receive buffer that buffer is moved into a receive return
1248 * ring. The driver is then responsible to passing the
1249 * buffer up to the stack. Many versions of the controller
1250 * support multiple RR rings.
1251 *
1252 * Send Ring
1253 * - This ring is used for outgoing frames. Many versions of
1254 * the controller support multiple send rings.
1255 */
1256
1257 /* Initialize the standard receive producer ring control block. */
1258 rcb = &sc->bnx_ldata.bnx_info.bnx_std_rx_rcb;
1259 rcb->bge_hostaddr.bge_addr_lo =
1260 BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_std_ring_paddr);
1261 rcb->bge_hostaddr.bge_addr_hi =
1262 BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_std_ring_paddr);
f368d0d9 1263 if (BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
1264 /*
1265 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1266 * Bits 15-2 : Maximum RX frame size
1267 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
1268 * Bit 0 : Reserved
1269 */
1270 rcb->bge_maxlen_flags =
1271 BGE_RCB_MAXLEN_FLAGS(512, BNX_MAX_FRAMELEN << 2);
1272 } else {
1273 /*
1274 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1275 * Bits 15-2 : Reserved (should be 0)
1276 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1277 * Bit 0 : Reserved
1278 */
1279 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1280 }
1281 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1282 sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1283 sc->bnx_asicrev == BGE_ASICREV_BCM5720)
1284 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1285 else
1286 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1287 /* Write the standard receive producer ring control block. */
1288 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1289 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1290 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1291 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1292 /* Reset the standard receive producer ring producer index. */
1293 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1294
1295 /*
1296 * Initialize the jumbo RX producer ring control
1297 * block. We set the 'ring disabled' bit in the
1298 * flags field until we're actually ready to start
1299 * using this ring (i.e. once we set the MTU
1300 * high enough to require it).
1301 */
1302 if (BNX_IS_JUMBO_CAPABLE(sc)) {
1303 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb;
1304 /* Get the jumbo receive producer ring RCB parameters. */
1305 rcb->bge_hostaddr.bge_addr_lo =
1306 BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
1307 rcb->bge_hostaddr.bge_addr_hi =
1308 BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
1309 rcb->bge_maxlen_flags =
1310 BGE_RCB_MAXLEN_FLAGS(BNX_MAX_FRAMELEN,
1311 BGE_RCB_FLAG_RING_DISABLED);
1312 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1313 sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1314 sc->bnx_asicrev == BGE_ASICREV_BCM5720)
1315 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1316 else
1317 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1318 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1319 rcb->bge_hostaddr.bge_addr_hi);
1320 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1321 rcb->bge_hostaddr.bge_addr_lo);
1322 /* Program the jumbo receive producer ring RCB parameters. */
1323 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1324 rcb->bge_maxlen_flags);
1325 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1326 /* Reset the jumbo receive producer ring producer index. */
1327 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1328 }
1329
1330 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
1331 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
1332 (sc->bnx_chipid == BGE_CHIPID_BCM5906_A0 ||
1333 sc->bnx_chipid == BGE_CHIPID_BCM5906_A1 ||
1334 sc->bnx_chipid == BGE_CHIPID_BCM5906_A2)) {
1335 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1336 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1337 }
1338
1339 /*
1340 * The BD ring replenish thresholds control how often the
1341 * hardware fetches new BD's from the producer rings in host
1342 * memory. Setting the value too low on a busy system can
1343 * starve the hardware and recue the throughpout.
1344 *
1345 * Set the BD ring replentish thresholds. The recommended
1346 * values are 1/8th the number of descriptors allocated to
1347 * each ring.
1348 */
1349 val = 8;
1350 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1351 if (BNX_IS_JUMBO_CAPABLE(sc)) {
1352 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1353 BGE_JUMBO_RX_RING_CNT/8);
1354 }
f368d0d9 1355 if (BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
1356 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1357 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1358 }
1359
1360 /*
1361 * Disable all send rings by setting the 'ring disabled' bit
1362 * in the flags field of all the TX send ring control blocks,
1363 * located in NIC memory.
1364 */
80969639
SZ
1365 if (BNX_IS_5717_PLUS(sc))
1366 limit = 4;
4f23029e
SZ
1367 else if (BNX_IS_57765_FAMILY(sc))
1368 limit = 2;
80969639
SZ
1369 else
1370 limit = 1;
6c8d8ecc
SZ
1371 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1372 for (i = 0; i < limit; i++) {
1373 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1374 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1375 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1376 vrcb += sizeof(struct bge_rcb);
1377 }
1378
1379 /* Configure send ring RCB 0 (we use only the first ring) */
1380 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1381 BGE_HOSTADDR(taddr, sc->bnx_ldata.bnx_tx_ring_paddr);
1382 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1383 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1384 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1385 sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1386 sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1387 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
1388 } else {
1389 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1390 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1391 }
1392 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1393 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1394
1395 /*
1396 * Disable all receive return rings by setting the
1397 * 'ring disabled' bit in the flags field of all the receive
1398 * return ring control blocks, located in NIC memory.
1399 */
80969639 1400 if (BNX_IS_5717_PLUS(sc)) {
6c8d8ecc
SZ
1401 /* Should be 17, use 16 until we get an SRAM map. */
1402 limit = 16;
4f23029e 1403 } else if (BNX_IS_57765_FAMILY(sc)) {
6c8d8ecc
SZ
1404 limit = 4;
1405 } else {
1406 limit = 1;
1407 }
1408 /* Disable all receive return rings. */
1409 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1410 for (i = 0; i < limit; i++) {
1411 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1412 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1413 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1414 BGE_RCB_FLAG_RING_DISABLED);
1415 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1416 bnx_writembx(sc, BGE_MBX_RX_CONS0_LO +
1417 (i * (sizeof(uint64_t))), 0);
1418 vrcb += sizeof(struct bge_rcb);
1419 }
1420
1421 /*
1422 * Set up receive return ring 0. Note that the NIC address
1423 * for RX return rings is 0x0. The return rings live entirely
1424 * within the host, so the nicaddr field in the RCB isn't used.
1425 */
1426 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1427 BGE_HOSTADDR(taddr, sc->bnx_ldata.bnx_rx_return_ring_paddr);
1428 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1429 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1430 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1431 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1432 BGE_RCB_MAXLEN_FLAGS(sc->bnx_return_ring_cnt, 0));
1433
1434 /* Set random backoff seed for TX */
1435 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1436 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1437 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1438 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1439 BGE_TX_BACKOFF_SEED_MASK);
1440
1441 /* Set inter-packet gap */
1442 val = 0x2620;
1443 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1444 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
1445 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
1446 }
1447 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
1448
1449 /*
1450 * Specify which ring to use for packets that don't match
1451 * any RX rules.
1452 */
1453 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1454
1455 /*
1456 * Configure number of RX lists. One interrupt distribution
1457 * list, sixteen active lists, one bad frames class.
1458 */
1459 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1460
1461 /* Inialize RX list placement stats mask. */
1462 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1463 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1464
1465 /* Disable host coalescing until we get it set up */
1466 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1467
1468 /* Poll to make sure it's shut down. */
1469 for (i = 0; i < BNX_TIMEOUT; i++) {
1470 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1471 break;
1472 DELAY(10);
1473 }
1474
1475 if (i == BNX_TIMEOUT) {
1476 if_printf(&sc->arpcom.ac_if,
1477 "host coalescing engine failed to idle\n");
1478 return(ENXIO);
1479 }
1480
1481 /* Set up host coalescing defaults */
1482 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bnx_rx_coal_ticks);
1483 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bnx_tx_coal_ticks);
1484 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bnx_rx_coal_bds);
1485 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bnx_tx_coal_bds);
1486 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, sc->bnx_rx_coal_bds_int);
1487 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, sc->bnx_tx_coal_bds_int);
1488
1489 /* Set up address of status block */
1490 bzero(sc->bnx_ldata.bnx_status_block, BGE_STATUS_BLK_SZ);
1491 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1492 BGE_ADDR_HI(sc->bnx_ldata.bnx_status_block_paddr));
1493 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1494 BGE_ADDR_LO(sc->bnx_ldata.bnx_status_block_paddr));
1495
1496 /* Set up status block partail update size. */
1497 val = BGE_STATBLKSZ_32BYTE;
1498#if 0
1499 /*
1500 * Does not seem to have visible effect in both
1501 * bulk data (1472B UDP datagram) and tiny data
1502 * (18B UDP datagram) TX tests.
1503 */
1504 val |= BGE_HCCMODE_CLRTICK_TX;
1505#endif
1506 /* Turn on host coalescing state machine */
1507 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1508
1509 /* Turn on RX BD completion state machine and enable attentions */
1510 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1511 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1512
1513 /* Turn on RX list placement state machine */
1514 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1515
1516 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1517 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1518 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1519 BGE_MACMODE_FRMHDR_DMA_ENB;
1520
1521 if (sc->bnx_flags & BNX_FLAG_TBI)
1522 val |= BGE_PORTMODE_TBI;
1523 else if (sc->bnx_flags & BNX_FLAG_MII_SERDES)
1524 val |= BGE_PORTMODE_GMII;
1525 else
1526 val |= BGE_PORTMODE_MII;
1527
1528 /* Turn on DMA, clear stats */
1529 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1530
1531 /* Set misc. local control, enable interrupts on attentions */
1532 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1533
1534#ifdef notdef
1535 /* Assert GPIO pins for PHY reset */
1536 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1537 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1538 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1539 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1540#endif
1541
1542 /* Turn on write DMA state machine */
1543 val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1544 /* Enable host coalescing bug fix. */
1545 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1546 if (sc->bnx_asicrev == BGE_ASICREV_BCM5785) {
1547 /* Request larger DMA burst size to get better performance. */
1548 val |= BGE_WDMAMODE_BURST_ALL_DATA;
1549 }
1550 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1551 DELAY(40);
1552
3730a14d 1553 if (BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
1554 uint32_t dmactl;
1555
1556 dmactl = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL);
1557 /*
1558 * Adjust tx margin to prevent TX data corruption and
1559 * fix internal FIFO overflow.
1560 */
1561 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1562 sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1563 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
1564 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
1565 BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
1566 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
1567 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
1568 BGE_RDMA_RSRVCTRL_TXMRGN_320B;
1569 }
1570 /*
1571 * Enable fix for read DMA FIFO overruns.
1572 * The fix is to limit the number of RX BDs
1573 * the hardware would fetch at a fime.
1574 */
1575 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL,
1576 dmactl | BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
1577 }
1578
1579 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) {
1580 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
1581 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
1582 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
1583 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
1584 } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1585 /*
1586 * Allow 4KB burst length reads for non-LSO frames.
1587 * Enable 512B burst length reads for buffer descriptors.
1588 */
1589 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
1590 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
1591 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
1592 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
1593 }
1594
1595 /* Turn on read DMA state machine */
1596 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1597 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717)
1598 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
1599 if (sc->bnx_asicrev == BGE_ASICREV_BCM5784 ||
1600 sc->bnx_asicrev == BGE_ASICREV_BCM5785 ||
1601 sc->bnx_asicrev == BGE_ASICREV_BCM57780) {
1602 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1603 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1604 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1605 }
1606 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1607 val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
1608 BGE_RDMAMODE_H2BNC_VLAN_DET;
1609 /*
1610 * Allow multiple outstanding read requests from
1611 * non-LSO read DMA engine.
1612 */
1613 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
1614 }
1615 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1616 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1617 DELAY(40);
1618
1619 /* Turn on RX data completion state machine */
1620 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1621
1622 /* Turn on RX BD initiator state machine */
1623 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1624
1625 /* Turn on RX data and RX BD initiator state machine */
1626 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1627
1628 /* Turn on send BD completion state machine */
1629 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1630
1631 /* Turn on send data completion state machine */
1632 val = BGE_SDCMODE_ENABLE;
1633 if (sc->bnx_asicrev == BGE_ASICREV_BCM5761)
1634 val |= BGE_SDCMODE_CDELAY;
1635 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1636
1637 /* Turn on send data initiator state machine */
1638 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1639
1640 /* Turn on send BD initiator state machine */
1641 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1642
1643 /* Turn on send BD selector state machine */
1644 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1645
1646 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1647 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1648 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1649
1650 /* ack/clear link change events */
1651 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1652 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1653 BGE_MACSTAT_LINK_CHANGED);
1654 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1655
1656 /*
1657 * Enable attention when the link has changed state for
1658 * devices that use auto polling.
1659 */
1660 if (sc->bnx_flags & BNX_FLAG_TBI) {
1661 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1662 } else {
1663 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
1664 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
1665 DELAY(80);
1666 }
1667 }
1668
1669 /*
1670 * Clear any pending link state attention.
1671 * Otherwise some link state change events may be lost until attention
1672 * is cleared by bnx_intr() -> bnx_softc.bnx_link_upd() sequence.
1673 * It's not necessary on newer BCM chips - perhaps enabling link
1674 * state change attentions implies clearing pending attention.
1675 */
1676 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1677 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1678 BGE_MACSTAT_LINK_CHANGED);
1679
1680 /* Enable link state change attentions. */
1681 BNX_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1682
1683 return(0);
1684}
1685
1686/*
1687 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1688 * against our list and return its name if we find a match. Note
1689 * that since the Broadcom controller contains VPD support, we
1690 * can get the device name string from the controller itself instead
1691 * of the compiled-in string. This is a little slow, but it guarantees
1692 * we'll always announce the right product name.
1693 */
1694static int
1695bnx_probe(device_t dev)
1696{
1697 const struct bnx_type *t;
1698 uint16_t product, vendor;
1699
1700 if (!pci_is_pcie(dev))
1701 return ENXIO;
1702
1703 product = pci_get_device(dev);
1704 vendor = pci_get_vendor(dev);
1705
1706 for (t = bnx_devs; t->bnx_name != NULL; t++) {
1707 if (vendor == t->bnx_vid && product == t->bnx_did)
1708 break;
1709 }
1710 if (t->bnx_name == NULL)
1711 return ENXIO;
1712
1713 device_set_desc(dev, t->bnx_name);
1714 return 0;
1715}
1716
1717static int
1718bnx_attach(device_t dev)
1719{
1720 struct ifnet *ifp;
1721 struct bnx_softc *sc;
1722 uint32_t hwcfg = 0, misccfg;
1723 int error = 0, rid, capmask;
1724 uint8_t ether_addr[ETHER_ADDR_LEN];
1725 uint16_t product, vendor;
1726 driver_intr_t *intr_func;
1727 uintptr_t mii_priv = 0;
1728 u_int intr_flags;
1729
1730 sc = device_get_softc(dev);
1731 sc->bnx_dev = dev;
1732 callout_init(&sc->bnx_stat_timer);
1733 lwkt_serialize_init(&sc->bnx_jslot_serializer);
1734
1735 product = pci_get_device(dev);
1736 vendor = pci_get_vendor(dev);
1737
1738#ifndef BURN_BRIDGES
1739 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1740 uint32_t irq, mem;
1741
1742 irq = pci_read_config(dev, PCIR_INTLINE, 4);
1743 mem = pci_read_config(dev, BGE_PCI_BAR0, 4);
1744
1745 device_printf(dev, "chip is in D%d power mode "
1746 "-- setting to D0\n", pci_get_powerstate(dev));
1747
1748 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1749
1750 pci_write_config(dev, PCIR_INTLINE, irq, 4);
1751 pci_write_config(dev, BGE_PCI_BAR0, mem, 4);
1752 }
1753#endif /* !BURN_BRIDGE */
1754
1755 /*
1756 * Map control/status registers.
1757 */
1758 pci_enable_busmaster(dev);
1759
1760 rid = BGE_PCI_BAR0;
1761 sc->bnx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1762 RF_ACTIVE);
1763
1764 if (sc->bnx_res == NULL) {
1765 device_printf(dev, "couldn't map memory\n");
1766 return ENXIO;
1767 }
1768
1769 sc->bnx_btag = rman_get_bustag(sc->bnx_res);
1770 sc->bnx_bhandle = rman_get_bushandle(sc->bnx_res);
1771
1772 /* Save various chip information */
1773 sc->bnx_chipid =
1774 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
1775 BGE_PCIMISCCTL_ASICREV_SHIFT;
1776 if (BGE_ASICREV(sc->bnx_chipid) == BGE_ASICREV_USE_PRODID_REG) {
1777 /* All chips having dedicated ASICREV register have CPMU */
1778 sc->bnx_flags |= BNX_FLAG_CPMU;
1779
1780 switch (product) {
1781 case PCI_PRODUCT_BROADCOM_BCM5717:
1782 case PCI_PRODUCT_BROADCOM_BCM5718:
1783 case PCI_PRODUCT_BROADCOM_BCM5719:
1784 case PCI_PRODUCT_BROADCOM_BCM5720_ALT:
1785 sc->bnx_chipid = pci_read_config(dev,
1786 BGE_PCI_GEN2_PRODID_ASICREV, 4);
1787 break;
1788
1789 case PCI_PRODUCT_BROADCOM_BCM57761:
1790 case PCI_PRODUCT_BROADCOM_BCM57765:
1791 case PCI_PRODUCT_BROADCOM_BCM57781:
1792 case PCI_PRODUCT_BROADCOM_BCM57785:
1793 case PCI_PRODUCT_BROADCOM_BCM57791:
1794 case PCI_PRODUCT_BROADCOM_BCM57795:
1795 sc->bnx_chipid = pci_read_config(dev,
1796 BGE_PCI_GEN15_PRODID_ASICREV, 4);
1797 break;
1798
1799 default:
1800 sc->bnx_chipid = pci_read_config(dev,
1801 BGE_PCI_PRODID_ASICREV, 4);
1802 break;
1803 }
1804 }
1805 sc->bnx_asicrev = BGE_ASICREV(sc->bnx_chipid);
1806 sc->bnx_chiprev = BGE_CHIPREV(sc->bnx_chipid);
1807
1808 switch (sc->bnx_asicrev) {
1809 case BGE_ASICREV_BCM5717:
1810 case BGE_ASICREV_BCM5719:
1811 case BGE_ASICREV_BCM5720:
f368d0d9
SZ
1812 sc->bnx_flags |= BNX_FLAG_5717_PLUS | BNX_FLAG_57765_PLUS;
1813 break;
1814
6c8d8ecc 1815 case BGE_ASICREV_BCM57765:
f368d0d9 1816 sc->bnx_flags |= BNX_FLAG_57765_FAMILY | BNX_FLAG_57765_PLUS;
6c8d8ecc
SZ
1817 break;
1818 }
1819 sc->bnx_flags |= BNX_FLAG_SHORTDMA;
1820
1821 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906)
1822 sc->bnx_flags |= BNX_FLAG_NO_EEPROM;
1823
1824 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK;
1825
1826 sc->bnx_pciecap = pci_get_pciecap_ptr(sc->bnx_dev);
1827 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1828 sc->bnx_asicrev == BGE_ASICREV_BCM5720)
1829 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_2048);
1830 else
1831 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096);
1832 device_printf(dev, "CHIP ID 0x%08x; "
1833 "ASIC REV 0x%02x; CHIP REV 0x%02x\n",
1834 sc->bnx_chipid, sc->bnx_asicrev, sc->bnx_chiprev);
1835
1836 /*
1837 * Set various PHY quirk flags.
1838 */
1839
1840 capmask = MII_CAPMASK_DEFAULT;
1841 if ((sc->bnx_asicrev == BGE_ASICREV_BCM5703 &&
1842 (misccfg == 0x4000 || misccfg == 0x8000)) ||
1843 (sc->bnx_asicrev == BGE_ASICREV_BCM5705 &&
1844 vendor == PCI_VENDOR_BROADCOM &&
1845 (product == PCI_PRODUCT_BROADCOM_BCM5901 ||
1846 product == PCI_PRODUCT_BROADCOM_BCM5901A2 ||
1847 product == PCI_PRODUCT_BROADCOM_BCM5705F)) ||
1848 (vendor == PCI_VENDOR_BROADCOM &&
1849 (product == PCI_PRODUCT_BROADCOM_BCM5751F ||
1850 product == PCI_PRODUCT_BROADCOM_BCM5753F ||
1851 product == PCI_PRODUCT_BROADCOM_BCM5787F)) ||
1852 product == PCI_PRODUCT_BROADCOM_BCM57790 ||
1853 sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
1854 /* 10/100 only */
1855 capmask &= ~BMSR_EXTSTAT;
1856 }
1857
1858 mii_priv |= BRGPHY_FLAG_WIRESPEED;
1859
6c8d8ecc
SZ
1860 /*
1861 * Allocate interrupt
1862 */
1863 sc->bnx_irq_type = pci_alloc_1intr(dev, bnx_msi_enable, &sc->bnx_irq_rid,
1864 &intr_flags);
1865
1866 sc->bnx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->bnx_irq_rid,
1867 intr_flags);
1868 if (sc->bnx_irq == NULL) {
1869 device_printf(dev, "couldn't map interrupt\n");
1870 error = ENXIO;
1871 goto fail;
1872 }
1873
1874 if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) {
1875 sc->bnx_flags |= BNX_FLAG_ONESHOT_MSI;
1876 bnx_enable_msi(sc);
1877 }
1878
1879 /* Initialize if_name earlier, so if_printf could be used */
1880 ifp = &sc->arpcom.ac_if;
1881 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1882
1883 /* Try to reset the chip. */
1884 bnx_reset(sc);
1885
1886 if (bnx_chipinit(sc)) {
1887 device_printf(dev, "chip initialization failed\n");
1888 error = ENXIO;
1889 goto fail;
1890 }
1891
1892 /*
1893 * Get station address
1894 */
1895 error = bnx_get_eaddr(sc, ether_addr);
1896 if (error) {
1897 device_printf(dev, "failed to read station address\n");
1898 goto fail;
1899 }
1900
f368d0d9 1901 if (BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
1902 sc->bnx_return_ring_cnt = BGE_RETURN_RING_CNT;
1903 } else {
1904 /* 5705/5750 limits RX return ring to 512 entries. */
1905 sc->bnx_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1906 }
1907
1908 error = bnx_dma_alloc(sc);
1909 if (error)
1910 goto fail;
1911
1912 /* Set default tuneable values. */
1913 sc->bnx_rx_coal_ticks = BNX_RX_COAL_TICKS_DEF;
1914 sc->bnx_tx_coal_ticks = BNX_TX_COAL_TICKS_DEF;
1915 sc->bnx_rx_coal_bds = BNX_RX_COAL_BDS_DEF;
1916 sc->bnx_tx_coal_bds = BNX_TX_COAL_BDS_DEF;
1917 sc->bnx_rx_coal_bds_int = BNX_RX_COAL_BDS_DEF;
1918 sc->bnx_tx_coal_bds_int = BNX_TX_COAL_BDS_DEF;
1919
1920 /* Set up ifnet structure */
1921 ifp->if_softc = sc;
1922 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1923 ifp->if_ioctl = bnx_ioctl;
1924 ifp->if_start = bnx_start;
1925#ifdef DEVICE_POLLING
1926 ifp->if_poll = bnx_poll;
1927#endif
1928 ifp->if_watchdog = bnx_watchdog;
1929 ifp->if_init = bnx_init;
1930 ifp->if_mtu = ETHERMTU;
1931 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1932 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1933 ifq_set_ready(&ifp->if_snd);
1934
1935 ifp->if_capabilities |= IFCAP_HWCSUM;
1936 ifp->if_hwassist = BNX_CSUM_FEATURES;
1937 ifp->if_capenable = ifp->if_capabilities;
1938
1939 /*
1940 * Figure out what sort of media we have by checking the
1941 * hardware config word in the first 32k of NIC internal memory,
1942 * or fall back to examining the EEPROM if necessary.
1943 * Note: on some BCM5700 cards, this value appears to be unset.
1944 * If that's the case, we have to rely on identifying the NIC
1945 * by its PCI subsystem ID, as we do below for the SysKonnect
1946 * SK-9D41.
1947 */
1948 if (bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) {
1949 hwcfg = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
1950 } else {
1951 if (bnx_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
1952 sizeof(hwcfg))) {
1953 device_printf(dev, "failed to read EEPROM\n");
1954 error = ENXIO;
1955 goto fail;
1956 }
1957 hwcfg = ntohl(hwcfg);
1958 }
1959
1960 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
1961 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41 ||
1962 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
1963 sc->bnx_flags |= BNX_FLAG_TBI;
1964
1965 /* Setup MI MODE */
1966 if (sc->bnx_flags & BNX_FLAG_CPMU)
1967 sc->bnx_mi_mode = BGE_MIMODE_500KHZ_CONST;
1968 else
1969 sc->bnx_mi_mode = BGE_MIMODE_BASE;
1970
1971 /* Setup link status update stuffs */
1972 if (sc->bnx_flags & BNX_FLAG_TBI) {
1973 sc->bnx_link_upd = bnx_tbi_link_upd;
1974 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
1975 } else if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
1976 sc->bnx_link_upd = bnx_autopoll_link_upd;
1977 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
1978 } else {
1979 sc->bnx_link_upd = bnx_copper_link_upd;
1980 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
1981 }
1982
1983 /* Set default PHY address */
1984 sc->bnx_phyno = 1;
1985
1986 /*
1987 * PHY address mapping for various devices.
1988 *
1989 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
1990 * ---------+-------+-------+-------+-------+
1991 * BCM57XX | 1 | X | X | X |
1992 * BCM5704 | 1 | X | 1 | X |
1993 * BCM5717 | 1 | 8 | 2 | 9 |
1994 * BCM5719 | 1 | 8 | 2 | 9 |
1995 * BCM5720 | 1 | 8 | 2 | 9 |
1996 *
1997 * Other addresses may respond but they are not
1998 * IEEE compliant PHYs and should be ignored.
1999 */
80969639 2000 if (BNX_IS_5717_PLUS(sc)) {
6c8d8ecc
SZ
2001 int f;
2002
2003 f = pci_get_function(dev);
2004 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_A0) {
2005 if (CSR_READ_4(sc, BGE_SGDIG_STS) &
2006 BGE_SGDIGSTS_IS_SERDES)
2007 sc->bnx_phyno = f + 8;
2008 else
2009 sc->bnx_phyno = f + 1;
2010 } else {
2011 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2012 BGE_CPMU_PHY_STRAP_IS_SERDES)
2013 sc->bnx_phyno = f + 8;
2014 else
2015 sc->bnx_phyno = f + 1;
2016 }
2017 }
2018
2019 if (sc->bnx_flags & BNX_FLAG_TBI) {
2020 ifmedia_init(&sc->bnx_ifmedia, IFM_IMASK,
2021 bnx_ifmedia_upd, bnx_ifmedia_sts);
2022 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2023 ifmedia_add(&sc->bnx_ifmedia,
2024 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2025 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2026 ifmedia_set(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO);
2027 sc->bnx_ifmedia.ifm_media = sc->bnx_ifmedia.ifm_cur->ifm_media;
2028 } else {
2029 struct mii_probe_args mii_args;
2030
2031 mii_probe_args_init(&mii_args, bnx_ifmedia_upd, bnx_ifmedia_sts);
2032 mii_args.mii_probemask = 1 << sc->bnx_phyno;
2033 mii_args.mii_capmask = capmask;
2034 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY;
2035 mii_args.mii_priv = mii_priv;
2036
2037 error = mii_probe(dev, &sc->bnx_miibus, &mii_args);
2038 if (error) {
2039 device_printf(dev, "MII without any PHY!\n");
2040 goto fail;
2041 }
2042 }
2043
2044 /*
2045 * Create sysctl nodes.
2046 */
2047 sysctl_ctx_init(&sc->bnx_sysctl_ctx);
2048 sc->bnx_sysctl_tree = SYSCTL_ADD_NODE(&sc->bnx_sysctl_ctx,
2049 SYSCTL_STATIC_CHILDREN(_hw),
2050 OID_AUTO,
2051 device_get_nameunit(dev),
2052 CTLFLAG_RD, 0, "");
2053 if (sc->bnx_sysctl_tree == NULL) {
2054 device_printf(dev, "can't add sysctl node\n");
2055 error = ENXIO;
2056 goto fail;
2057 }
2058
2059 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2060 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2061 OID_AUTO, "rx_coal_ticks",
2062 CTLTYPE_INT | CTLFLAG_RW,
2063 sc, 0, bnx_sysctl_rx_coal_ticks, "I",
2064 "Receive coalescing ticks (usec).");
2065 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2066 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2067 OID_AUTO, "tx_coal_ticks",
2068 CTLTYPE_INT | CTLFLAG_RW,
2069 sc, 0, bnx_sysctl_tx_coal_ticks, "I",
2070 "Transmit coalescing ticks (usec).");
2071 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2072 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2073 OID_AUTO, "rx_coal_bds",
2074 CTLTYPE_INT | CTLFLAG_RW,
2075 sc, 0, bnx_sysctl_rx_coal_bds, "I",
2076 "Receive max coalesced BD count.");
2077 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2078 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2079 OID_AUTO, "tx_coal_bds",
2080 CTLTYPE_INT | CTLFLAG_RW,
2081 sc, 0, bnx_sysctl_tx_coal_bds, "I",
2082 "Transmit max coalesced BD count.");
2083 /*
2084 * A common design characteristic for many Broadcom
2085 * client controllers is that they only support a
2086 * single outstanding DMA read operation on the PCIe
2087 * bus. This means that it will take twice as long to
2088 * fetch a TX frame that is split into header and
2089 * payload buffers as it does to fetch a single,
2090 * contiguous TX frame (2 reads vs. 1 read). For these
2091 * controllers, coalescing buffers to reduce the number
2092 * of memory reads is effective way to get maximum
2093 * performance(about 940Mbps). Without collapsing TX
2094 * buffers the maximum TCP bulk transfer performance
2095 * is about 850Mbps. However forcing coalescing mbufs
2096 * consumes a lot of CPU cycles, so leave it off by
2097 * default.
2098 */
2099 SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx,
2100 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2101 "force_defrag", CTLFLAG_RW, &sc->bnx_force_defrag, 0,
2102 "Force defragment on TX path");
2103
2104 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2105 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2106 "rx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2107 sc, 0, bnx_sysctl_rx_coal_bds_int, "I",
2108 "Receive max coalesced BD count during interrupt.");
2109 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2110 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2111 "tx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2112 sc, 0, bnx_sysctl_tx_coal_bds_int, "I",
2113 "Transmit max coalesced BD count during interrupt.");
2114
2115 /*
2116 * Call MI attach routine.
2117 */
2118 ether_ifattach(ifp, ether_addr, NULL);
2119
2120 if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) {
2121 if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) {
2122 intr_func = bnx_msi_oneshot;
2123 if (bootverbose)
2124 device_printf(dev, "oneshot MSI\n");
2125 } else {
2126 intr_func = bnx_msi;
2127 }
2128 } else {
2129 intr_func = bnx_intr_legacy;
2130 }
2131 error = bus_setup_intr(dev, sc->bnx_irq, INTR_MPSAFE, intr_func, sc,
2132 &sc->bnx_intrhand, ifp->if_serializer);
2133 if (error) {
2134 ether_ifdetach(ifp);
2135 device_printf(dev, "couldn't set up irq\n");
2136 goto fail;
2137 }
2138
2139 ifp->if_cpuid = rman_get_cpuid(sc->bnx_irq);
2140 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
2141
2142 return(0);
2143fail:
2144 bnx_detach(dev);
2145 return(error);
2146}
2147
2148static int
2149bnx_detach(device_t dev)
2150{
2151 struct bnx_softc *sc = device_get_softc(dev);
2152
2153 if (device_is_attached(dev)) {
2154 struct ifnet *ifp = &sc->arpcom.ac_if;
2155
2156 lwkt_serialize_enter(ifp->if_serializer);
2157 bnx_stop(sc);
2158 bnx_reset(sc);
2159 bus_teardown_intr(dev, sc->bnx_irq, sc->bnx_intrhand);
2160 lwkt_serialize_exit(ifp->if_serializer);
2161
2162 ether_ifdetach(ifp);
2163 }
2164
2165 if (sc->bnx_flags & BNX_FLAG_TBI)
2166 ifmedia_removeall(&sc->bnx_ifmedia);
2167 if (sc->bnx_miibus)
2168 device_delete_child(dev, sc->bnx_miibus);
2169 bus_generic_detach(dev);
2170
2171 if (sc->bnx_irq != NULL) {
2172 bus_release_resource(dev, SYS_RES_IRQ, sc->bnx_irq_rid,
2173 sc->bnx_irq);
2174 }
2175 if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI)
2176 pci_release_msi(dev);
2177
2178 if (sc->bnx_res != NULL) {
2179 bus_release_resource(dev, SYS_RES_MEMORY,
2180 BGE_PCI_BAR0, sc->bnx_res);
2181 }
2182
2183 if (sc->bnx_sysctl_tree != NULL)
2184 sysctl_ctx_free(&sc->bnx_sysctl_ctx);
2185
2186 bnx_dma_free(sc);
2187
2188 return 0;
2189}
2190
2191static void
2192bnx_reset(struct bnx_softc *sc)
2193{
2194 device_t dev;
2195 uint32_t cachesize, command, pcistate, reset;
2196 void (*write_op)(struct bnx_softc *, uint32_t, uint32_t);
2197 int i, val = 0;
2198 uint16_t devctl;
2199
2200 dev = sc->bnx_dev;
2201
2202 if (sc->bnx_asicrev != BGE_ASICREV_BCM5906)
2203 write_op = bnx_writemem_direct;
2204 else
2205 write_op = bnx_writereg_ind;
2206
2207 /* Save some important PCI state. */
2208 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2209 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2210 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2211
2212 pci_write_config(dev, BGE_PCI_MISC_CTL,
2213 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2214 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2215 BGE_PCIMISCCTL_TAGGED_STATUS, 4);
2216
2217 /* Disable fastboot on controllers that support it. */
2218 if (bootverbose)
2219 if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n");
2220 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2221
2222 /*
2223 * Write the magic number to SRAM at offset 0xB50.
2224 * When firmware finishes its initialization it will
2225 * write ~BGE_MAGIC_NUMBER to the same location.
2226 */
2227 bnx_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2228
2229 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2230
2231 /* XXX: Broadcom Linux driver. */
2232 /* Force PCI-E 1.0a mode */
3730a14d 2233 if (!BNX_IS_57765_PLUS(sc) &&
6c8d8ecc
SZ
2234 CSR_READ_4(sc, BGE_PCIE_PHY_TSTCTL) ==
2235 (BGE_PCIE_PHY_TSTCTL_PSCRAM |
2236 BGE_PCIE_PHY_TSTCTL_PCIE10)) {
2237 CSR_WRITE_4(sc, BGE_PCIE_PHY_TSTCTL,
2238 BGE_PCIE_PHY_TSTCTL_PSCRAM);
2239 }
2240 if (sc->bnx_chipid != BGE_CHIPID_BCM5750_A0) {
2241 /* Prevent PCIE link training during global reset */
2242 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2243 reset |= (1<<29);
2244 }
2245
2246 /*
2247 * Set GPHY Power Down Override to leave GPHY
2248 * powered up in D0 uninitialized.
2249 */
2250 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0)
2251 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
2252
2253 /* Issue global reset */
2254 write_op(sc, BGE_MISC_CFG, reset);
2255
2256 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
2257 uint32_t status, ctrl;
2258
2259 status = CSR_READ_4(sc, BGE_VCPU_STATUS);
2260 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
2261 status | BGE_VCPU_STATUS_DRV_RESET);
2262 ctrl = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
2263 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
2264 ctrl & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
2265 }
2266
2267 DELAY(1000);
2268
2269 /* XXX: Broadcom Linux driver. */
2270 if (sc->bnx_chipid == BGE_CHIPID_BCM5750_A0) {
2271 uint32_t v;
2272
2273 DELAY(500000); /* wait for link training to complete */
2274 v = pci_read_config(dev, 0xc4, 4);
2275 pci_write_config(dev, 0xc4, v | (1<<15), 4);
2276 }
2277
2278 devctl = pci_read_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL, 2);
2279
2280 /* Disable no snoop and disable relaxed ordering. */
2281 devctl &= ~(PCIEM_DEVCTL_RELAX_ORDER | PCIEM_DEVCTL_NOSNOOP);
2282
2283 /* Old PCI-E chips only support 128 bytes Max PayLoad Size. */
2284 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0) {
2285 devctl &= ~PCIEM_DEVCTL_MAX_PAYLOAD_MASK;
2286 devctl |= PCIEM_DEVCTL_MAX_PAYLOAD_128;
2287 }
2288
2289 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL,
2290 devctl, 2);
2291
2292 /* Clear error status. */
2293 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVSTS,
2294 PCIEM_DEVSTS_CORR_ERR |
2295 PCIEM_DEVSTS_NFATAL_ERR |
2296 PCIEM_DEVSTS_FATAL_ERR |
2297 PCIEM_DEVSTS_UNSUPP_REQ, 2);
2298
2299 /* Reset some of the PCI state that got zapped by reset */
2300 pci_write_config(dev, BGE_PCI_MISC_CTL,
2301 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2302 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2303 BGE_PCIMISCCTL_TAGGED_STATUS, 4);
2304 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2305 pci_write_config(dev, BGE_PCI_CMD, command, 4);
2306 write_op(sc, BGE_MISC_CFG, (65 << 1));
2307
2308 /* Enable memory arbiter */
2309 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2310
2311 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
2312 for (i = 0; i < BNX_TIMEOUT; i++) {
2313 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2314 if (val & BGE_VCPU_STATUS_INIT_DONE)
2315 break;
2316 DELAY(100);
2317 }
2318 if (i == BNX_TIMEOUT) {
2319 if_printf(&sc->arpcom.ac_if, "reset timed out\n");
2320 return;
2321 }
2322 } else {
2323 /*
2324 * Poll until we see the 1's complement of the magic number.
2325 * This indicates that the firmware initialization
2326 * is complete.
2327 */
2328 for (i = 0; i < BNX_FIRMWARE_TIMEOUT; i++) {
2329 val = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2330 if (val == ~BGE_MAGIC_NUMBER)
2331 break;
2332 DELAY(10);
2333 }
2334 if (i == BNX_FIRMWARE_TIMEOUT) {
2335 if_printf(&sc->arpcom.ac_if, "firmware handshake "
2336 "timed out, found 0x%08x\n", val);
2337 }
2338
2339 /* BCM57765 A0 needs additional time before accessing. */
2340 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0)
2341 DELAY(10 * 1000);
2342 }
2343
2344 /*
2345 * XXX Wait for the value of the PCISTATE register to
2346 * return to its original pre-reset state. This is a
2347 * fairly good indicator of reset completion. If we don't
2348 * wait for the reset to fully complete, trying to read
2349 * from the device's non-PCI registers may yield garbage
2350 * results.
2351 */
2352 for (i = 0; i < BNX_TIMEOUT; i++) {
2353 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2354 break;
2355 DELAY(10);
2356 }
2357
2358 /* Fix up byte swapping */
2359 CSR_WRITE_4(sc, BGE_MODE_CTL, bnx_dma_swap_options(sc));
2360
2361 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2362
2363 /*
2364 * The 5704 in TBI mode apparently needs some special
2365 * adjustment to insure the SERDES drive level is set
2366 * to 1.2V.
2367 */
2368 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704 &&
2369 (sc->bnx_flags & BNX_FLAG_TBI)) {
2370 uint32_t serdescfg;
2371
2372 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2373 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2374 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2375 }
2376
2377 /* XXX: Broadcom Linux driver. */
3730a14d 2378 if (!BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
2379 uint32_t v;
2380
2381 /* Enable Data FIFO protection. */
f1f34fc4
SZ
2382 v = CSR_READ_4(sc, BGE_PCIE_TLDLPL_PORT);
2383 CSR_WRITE_4(sc, BGE_PCIE_TLDLPL_PORT, v | (1 << 25));
6c8d8ecc
SZ
2384 }
2385
2386 DELAY(10000);
2387
2388 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
2389 BNX_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
2390 CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
2391 }
2392}
2393
2394/*
2395 * Frame reception handling. This is called if there's a frame
2396 * on the receive return list.
2397 *
2398 * Note: we have to be able to handle two possibilities here:
2399 * 1) the frame is from the jumbo recieve ring
2400 * 2) the frame is from the standard receive ring
2401 */
2402
2403static void
2404bnx_rxeof(struct bnx_softc *sc, uint16_t rx_prod)
2405{
2406 struct ifnet *ifp;
2407 int stdcnt = 0, jumbocnt = 0;
2408
2409 ifp = &sc->arpcom.ac_if;
2410
2411 while (sc->bnx_rx_saved_considx != rx_prod) {
2412 struct bge_rx_bd *cur_rx;
2413 uint32_t rxidx;
2414 struct mbuf *m = NULL;
2415 uint16_t vlan_tag = 0;
2416 int have_tag = 0;
2417
2418 cur_rx =
2419 &sc->bnx_ldata.bnx_rx_return_ring[sc->bnx_rx_saved_considx];
2420
2421 rxidx = cur_rx->bge_idx;
2422 BNX_INC(sc->bnx_rx_saved_considx, sc->bnx_return_ring_cnt);
2423
2424 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2425 have_tag = 1;
2426 vlan_tag = cur_rx->bge_vlan_tag;
2427 }
2428
2429 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2430 BNX_INC(sc->bnx_jumbo, BGE_JUMBO_RX_RING_CNT);
2431 jumbocnt++;
2432
2433 if (rxidx != sc->bnx_jumbo) {
2434 ifp->if_ierrors++;
2435 if_printf(ifp, "sw jumbo index(%d) "
2436 "and hw jumbo index(%d) mismatch, drop!\n",
2437 sc->bnx_jumbo, rxidx);
2438 bnx_setup_rxdesc_jumbo(sc, rxidx);
2439 continue;
2440 }
2441
2442 m = sc->bnx_cdata.bnx_rx_jumbo_chain[rxidx].bnx_mbuf;
2443 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2444 ifp->if_ierrors++;
2445 bnx_setup_rxdesc_jumbo(sc, sc->bnx_jumbo);
2446 continue;
2447 }
2448 if (bnx_newbuf_jumbo(sc, sc->bnx_jumbo, 0)) {
2449 ifp->if_ierrors++;
2450 bnx_setup_rxdesc_jumbo(sc, sc->bnx_jumbo);
2451 continue;
2452 }
2453 } else {
2454 BNX_INC(sc->bnx_std, BGE_STD_RX_RING_CNT);
2455 stdcnt++;
2456
2457 if (rxidx != sc->bnx_std) {
2458 ifp->if_ierrors++;
2459 if_printf(ifp, "sw std index(%d) "
2460 "and hw std index(%d) mismatch, drop!\n",
2461 sc->bnx_std, rxidx);
2462 bnx_setup_rxdesc_std(sc, rxidx);
2463 continue;
2464 }
2465
2466 m = sc->bnx_cdata.bnx_rx_std_chain[rxidx].bnx_mbuf;
2467 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2468 ifp->if_ierrors++;
2469 bnx_setup_rxdesc_std(sc, sc->bnx_std);
2470 continue;
2471 }
2472 if (bnx_newbuf_std(sc, sc->bnx_std, 0)) {
2473 ifp->if_ierrors++;
2474 bnx_setup_rxdesc_std(sc, sc->bnx_std);
2475 continue;
2476 }
2477 }
2478
2479 ifp->if_ipackets++;
2480 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2481 m->m_pkthdr.rcvif = ifp;
2482
2483 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2484 (cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
2485 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2486 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2487 if ((cur_rx->bge_error_flag &
2488 BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
2489 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2490 }
2491 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2492 m->m_pkthdr.csum_data =
2493 cur_rx->bge_tcp_udp_csum;
2494 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
2495 CSUM_PSEUDO_HDR;
2496 }
2497 }
2498
2499 /*
2500 * If we received a packet with a vlan tag, pass it
2501 * to vlan_input() instead of ether_input().
2502 */
2503 if (have_tag) {
2504 m->m_flags |= M_VLANTAG;
2505 m->m_pkthdr.ether_vlantag = vlan_tag;
2506 have_tag = vlan_tag = 0;
2507 }
2508 ifp->if_input(ifp, m);
2509 }
2510
2511 bnx_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bnx_rx_saved_considx);
2512 if (stdcnt)
2513 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bnx_std);
2514 if (jumbocnt)
2515 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo);
2516}
2517
2518static void
2519bnx_txeof(struct bnx_softc *sc, uint16_t tx_cons)
2520{
2521 struct bge_tx_bd *cur_tx = NULL;
2522 struct ifnet *ifp;
2523
2524 ifp = &sc->arpcom.ac_if;
2525
2526 /*
2527 * Go through our tx ring and free mbufs for those
2528 * frames that have been sent.
2529 */
2530 while (sc->bnx_tx_saved_considx != tx_cons) {
2531 uint32_t idx = 0;
2532
2533 idx = sc->bnx_tx_saved_considx;
2534 cur_tx = &sc->bnx_ldata.bnx_tx_ring[idx];
2535 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2536 ifp->if_opackets++;
2537 if (sc->bnx_cdata.bnx_tx_chain[idx] != NULL) {
2538 bus_dmamap_unload(sc->bnx_cdata.bnx_tx_mtag,
2539 sc->bnx_cdata.bnx_tx_dmamap[idx]);
2540 m_freem(sc->bnx_cdata.bnx_tx_chain[idx]);
2541 sc->bnx_cdata.bnx_tx_chain[idx] = NULL;
2542 }
2543 sc->bnx_txcnt--;
2544 BNX_INC(sc->bnx_tx_saved_considx, BGE_TX_RING_CNT);
2545 }
2546
2547 if (cur_tx != NULL &&
2548 (BGE_TX_RING_CNT - sc->bnx_txcnt) >=
2549 (BNX_NSEG_RSVD + BNX_NSEG_SPARE))
2550 ifp->if_flags &= ~IFF_OACTIVE;
2551
2552 if (sc->bnx_txcnt == 0)
2553 ifp->if_timer = 0;
2554
2555 if (!ifq_is_empty(&ifp->if_snd))
2556 if_devstart(ifp);
2557}
2558
2559#ifdef DEVICE_POLLING
2560
2561static void
2562bnx_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2563{
2564 struct bnx_softc *sc = ifp->if_softc;
2565 struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
2566 uint16_t rx_prod, tx_cons;
2567
2568 switch(cmd) {
2569 case POLL_REGISTER:
2570 bnx_disable_intr(sc);
2571 break;
2572 case POLL_DEREGISTER:
2573 bnx_enable_intr(sc);
2574 break;
2575 case POLL_AND_CHECK_STATUS:
2576 /*
2577 * Process link state changes.
2578 */
2579 bnx_link_poll(sc);
2580 /* Fall through */
2581 case POLL_ONLY:
2582 sc->bnx_status_tag = sblk->bge_status_tag;
2583 /*
2584 * Use a load fence to ensure that status_tag
2585 * is saved before rx_prod and tx_cons.
2586 */
2587 cpu_lfence();
2588
2589 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
2590 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
2591 if (ifp->if_flags & IFF_RUNNING) {
2592 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
2593 if (sc->bnx_rx_saved_considx != rx_prod)
2594 bnx_rxeof(sc, rx_prod);
2595
2596 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
2597 if (sc->bnx_tx_saved_considx != tx_cons)
2598 bnx_txeof(sc, tx_cons);
2599 }
2600 break;
2601 }
2602}
2603
2604#endif
2605
2606static void
2607bnx_intr_legacy(void *xsc)
2608{
2609 struct bnx_softc *sc = xsc;
2610 struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
2611
2612 if (sc->bnx_status_tag == sblk->bge_status_tag) {
2613 uint32_t val;
2614
2615 val = pci_read_config(sc->bnx_dev, BGE_PCI_PCISTATE, 4);
2616 if (val & BGE_PCISTAT_INTR_NOTACT)
2617 return;
2618 }
2619
2620 /*
2621 * NOTE:
2622 * Interrupt will have to be disabled if tagged status
2623 * is used, else interrupt will always be asserted on
2624 * certain chips (at least on BCM5750 AX/BX).
2625 */
2626 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
2627
2628 bnx_intr(sc);
2629}
2630
2631static void
2632bnx_msi(void *xsc)
2633{
2634 struct bnx_softc *sc = xsc;
2635
2636 /* Disable interrupt first */
2637 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
2638 bnx_intr(sc);
2639}
2640
2641static void
2642bnx_msi_oneshot(void *xsc)
2643{
2644 bnx_intr(xsc);
2645}
2646
2647static void
2648bnx_intr(struct bnx_softc *sc)
2649{
2650 struct ifnet *ifp = &sc->arpcom.ac_if;
2651 struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
2652 uint16_t rx_prod, tx_cons;
2653 uint32_t status;
2654
2655 sc->bnx_status_tag = sblk->bge_status_tag;
2656 /*
2657 * Use a load fence to ensure that status_tag is saved
2658 * before rx_prod, tx_cons and status.
2659 */
2660 cpu_lfence();
2661
2662 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
2663 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
2664 status = sblk->bge_status;
2665
2666 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) || sc->bnx_link_evt)
2667 bnx_link_poll(sc);
2668
2669 if (ifp->if_flags & IFF_RUNNING) {
2670 if (sc->bnx_rx_saved_considx != rx_prod)
2671 bnx_rxeof(sc, rx_prod);
2672
2673 if (sc->bnx_tx_saved_considx != tx_cons)
2674 bnx_txeof(sc, tx_cons);
2675 }
2676
2677 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24);
2678
2679 if (sc->bnx_coal_chg)
2680 bnx_coal_change(sc);
2681}
2682
2683static void
2684bnx_tick(void *xsc)
2685{
2686 struct bnx_softc *sc = xsc;
2687 struct ifnet *ifp = &sc->arpcom.ac_if;
2688
2689 lwkt_serialize_enter(ifp->if_serializer);
2690
2691 bnx_stats_update_regs(sc);
2692
2693 if (sc->bnx_flags & BNX_FLAG_TBI) {
2694 /*
2695 * Since in TBI mode auto-polling can't be used we should poll
2696 * link status manually. Here we register pending link event
2697 * and trigger interrupt.
2698 */
2699 sc->bnx_link_evt++;
2700 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
2701 } else if (!sc->bnx_link) {
2702 mii_tick(device_get_softc(sc->bnx_miibus));
2703 }
2704
2705 callout_reset(&sc->bnx_stat_timer, hz, bnx_tick, sc);
2706
2707 lwkt_serialize_exit(ifp->if_serializer);
2708}
2709
2710static void
2711bnx_stats_update_regs(struct bnx_softc *sc)
2712{
2713 struct ifnet *ifp = &sc->arpcom.ac_if;
2714 struct bge_mac_stats_regs stats;
2715 uint32_t *s;
2716 int i;
2717
2718 s = (uint32_t *)&stats;
2719 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2720 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2721 s++;
2722 }
2723
2724 ifp->if_collisions +=
2725 (stats.dot3StatsSingleCollisionFrames +
2726 stats.dot3StatsMultipleCollisionFrames +
2727 stats.dot3StatsExcessiveCollisions +
2728 stats.dot3StatsLateCollisions) -
2729 ifp->if_collisions;
2730}
2731
2732/*
2733 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2734 * pointers to descriptors.
2735 */
2736static int
2737bnx_encap(struct bnx_softc *sc, struct mbuf **m_head0, uint32_t *txidx)
2738{
2739 struct bge_tx_bd *d = NULL;
2740 uint16_t csum_flags = 0;
2741 bus_dma_segment_t segs[BNX_NSEG_NEW];
2742 bus_dmamap_t map;
2743 int error, maxsegs, nsegs, idx, i;
2744 struct mbuf *m_head = *m_head0, *m_new;
2745
2746 if (m_head->m_pkthdr.csum_flags) {
2747 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2748 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2749 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2750 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2751 if (m_head->m_flags & M_LASTFRAG)
2752 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2753 else if (m_head->m_flags & M_FRAG)
2754 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2755 }
2756
2757 idx = *txidx;
2758 map = sc->bnx_cdata.bnx_tx_dmamap[idx];
2759
2760 maxsegs = (BGE_TX_RING_CNT - sc->bnx_txcnt) - BNX_NSEG_RSVD;
2761 KASSERT(maxsegs >= BNX_NSEG_SPARE,
2762 ("not enough segments %d", maxsegs));
2763
2764 if (maxsegs > BNX_NSEG_NEW)
2765 maxsegs = BNX_NSEG_NEW;
2766
2767 /*
2768 * Pad outbound frame to BGE_MIN_FRAMELEN for an unusual reason.
2769 * The bge hardware will pad out Tx runts to BGE_MIN_FRAMELEN,
2770 * but when such padded frames employ the bge IP/TCP checksum
2771 * offload, the hardware checksum assist gives incorrect results
2772 * (possibly from incorporating its own padding into the UDP/TCP
2773 * checksum; who knows). If we pad such runts with zeros, the
2774 * onboard checksum comes out correct.
2775 */
2776 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
2777 m_head->m_pkthdr.len < BNX_MIN_FRAMELEN) {
2778 error = m_devpad(m_head, BNX_MIN_FRAMELEN);
2779 if (error)
2780 goto back;
2781 }
2782
2783 if ((sc->bnx_flags & BNX_FLAG_SHORTDMA) && m_head->m_next != NULL) {
2784 m_new = bnx_defrag_shortdma(m_head);
2785 if (m_new == NULL) {
2786 error = ENOBUFS;
2787 goto back;
2788 }
2789 *m_head0 = m_head = m_new;
2790 }
2791 if (sc->bnx_force_defrag && m_head->m_next != NULL) {
2792 /*
2793 * Forcefully defragment mbuf chain to overcome hardware
2794 * limitation which only support a single outstanding
2795 * DMA read operation. If it fails, keep moving on using
2796 * the original mbuf chain.
2797 */
2798 m_new = m_defrag(m_head, MB_DONTWAIT);
2799 if (m_new != NULL)
2800 *m_head0 = m_head = m_new;
2801 }
2802
2803 error = bus_dmamap_load_mbuf_defrag(sc->bnx_cdata.bnx_tx_mtag, map,
2804 m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
2805 if (error)
2806 goto back;
2807
2808 m_head = *m_head0;
2809 bus_dmamap_sync(sc->bnx_cdata.bnx_tx_mtag, map, BUS_DMASYNC_PREWRITE);
2810
2811 for (i = 0; ; i++) {
2812 d = &sc->bnx_ldata.bnx_tx_ring[idx];
2813
2814 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
2815 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
2816 d->bge_len = segs[i].ds_len;
2817 d->bge_flags = csum_flags;
2818
2819 if (i == nsegs - 1)
2820 break;
2821 BNX_INC(idx, BGE_TX_RING_CNT);
2822 }
2823 /* Mark the last segment as end of packet... */
2824 d->bge_flags |= BGE_TXBDFLAG_END;
2825
2826 /* Set vlan tag to the first segment of the packet. */
2827 d = &sc->bnx_ldata.bnx_tx_ring[*txidx];
2828 if (m_head->m_flags & M_VLANTAG) {
2829 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2830 d->bge_vlan_tag = m_head->m_pkthdr.ether_vlantag;
2831 } else {
2832 d->bge_vlan_tag = 0;
2833 }
2834
2835 /*
2836 * Insure that the map for this transmission is placed at
2837 * the array index of the last descriptor in this chain.
2838 */
2839 sc->bnx_cdata.bnx_tx_dmamap[*txidx] = sc->bnx_cdata.bnx_tx_dmamap[idx];
2840 sc->bnx_cdata.bnx_tx_dmamap[idx] = map;
2841 sc->bnx_cdata.bnx_tx_chain[idx] = m_head;
2842 sc->bnx_txcnt += nsegs;
2843
2844 BNX_INC(idx, BGE_TX_RING_CNT);
2845 *txidx = idx;
2846back:
2847 if (error) {
2848 m_freem(*m_head0);
2849 *m_head0 = NULL;
2850 }
2851 return error;
2852}
2853
2854/*
2855 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2856 * to the mbuf data regions directly in the transmit descriptors.
2857 */
2858static void
2859bnx_start(struct ifnet *ifp)
2860{
2861 struct bnx_softc *sc = ifp->if_softc;
2862 struct mbuf *m_head = NULL;
2863 uint32_t prodidx;
2864 int need_trans;
2865
2866 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
2867 return;
2868
2869 prodidx = sc->bnx_tx_prodidx;
2870
2871 need_trans = 0;
2872 while (sc->bnx_cdata.bnx_tx_chain[prodidx] == NULL) {
2873 m_head = ifq_dequeue(&ifp->if_snd, NULL);
2874 if (m_head == NULL)
2875 break;
2876
2877 /*
2878 * XXX
2879 * The code inside the if() block is never reached since we
2880 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
2881 * requests to checksum TCP/UDP in a fragmented packet.
2882 *
2883 * XXX
2884 * safety overkill. If this is a fragmented packet chain
2885 * with delayed TCP/UDP checksums, then only encapsulate
2886 * it if we have enough descriptors to handle the entire
2887 * chain at once.
2888 * (paranoia -- may not actually be needed)
2889 */
2890 if ((m_head->m_flags & M_FIRSTFRAG) &&
2891 (m_head->m_pkthdr.csum_flags & CSUM_DELAY_DATA)) {
2892 if ((BGE_TX_RING_CNT - sc->bnx_txcnt) <
2893 m_head->m_pkthdr.csum_data + BNX_NSEG_RSVD) {
2894 ifp->if_flags |= IFF_OACTIVE;
2895 ifq_prepend(&ifp->if_snd, m_head);
2896 break;
2897 }
2898 }
2899
2900 /*
2901 * Sanity check: avoid coming within BGE_NSEG_RSVD
2902 * descriptors of the end of the ring. Also make
2903 * sure there are BGE_NSEG_SPARE descriptors for
2904 * jumbo buffers' defragmentation.
2905 */
2906 if ((BGE_TX_RING_CNT - sc->bnx_txcnt) <
2907 (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) {
2908 ifp->if_flags |= IFF_OACTIVE;
2909 ifq_prepend(&ifp->if_snd, m_head);
2910 break;
2911 }
2912
2913 /*
2914 * Pack the data into the transmit ring. If we
2915 * don't have room, set the OACTIVE flag and wait
2916 * for the NIC to drain the ring.
2917 */
2918 if (bnx_encap(sc, &m_head, &prodidx)) {
2919 ifp->if_flags |= IFF_OACTIVE;
2920 ifp->if_oerrors++;
2921 break;
2922 }
2923 need_trans = 1;
2924
2925 ETHER_BPF_MTAP(ifp, m_head);
2926 }
2927
2928 if (!need_trans)
2929 return;
2930
2931 /* Transmit */
2932 bnx_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2933
2934 sc->bnx_tx_prodidx = prodidx;
2935
2936 /*
2937 * Set a timeout in case the chip goes out to lunch.
2938 */
2939 ifp->if_timer = 5;
2940}
2941
2942static void
2943bnx_init(void *xsc)
2944{
2945 struct bnx_softc *sc = xsc;
2946 struct ifnet *ifp = &sc->arpcom.ac_if;
2947 uint16_t *m;
2948 uint32_t mode;
2949
2950 ASSERT_SERIALIZED(ifp->if_serializer);
2951
2952 /* Cancel pending I/O and flush buffers. */
2953 bnx_stop(sc);
2954 bnx_reset(sc);
2955 bnx_chipinit(sc);
2956
2957 /*
2958 * Init the various state machines, ring
2959 * control blocks and firmware.
2960 */
2961 if (bnx_blockinit(sc)) {
2962 if_printf(ifp, "initialization failure\n");
2963 bnx_stop(sc);
2964 return;
2965 }
2966
2967 /* Specify MTU. */
2968 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
2969 ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
2970
2971 /* Load our MAC address. */
2972 m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
2973 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2974 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2975
2976 /* Enable or disable promiscuous mode as needed. */
2977 bnx_setpromisc(sc);
2978
2979 /* Program multicast filter. */
2980 bnx_setmulti(sc);
2981
2982 /* Init RX ring. */
2983 if (bnx_init_rx_ring_std(sc)) {
2984 if_printf(ifp, "RX ring initialization failed\n");
2985 bnx_stop(sc);
2986 return;
2987 }
2988
2989 /* Init jumbo RX ring. */
2990 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) {
2991 if (bnx_init_rx_ring_jumbo(sc)) {
2992 if_printf(ifp, "Jumbo RX ring initialization failed\n");
2993 bnx_stop(sc);
2994 return;
2995 }
2996 }
2997
2998 /* Init our RX return ring index */
2999 sc->bnx_rx_saved_considx = 0;
3000
3001 /* Init TX ring. */
3002 bnx_init_tx_ring(sc);
3003
3004 /* Enable TX MAC state machine lockup fix. */
3005 mode = CSR_READ_4(sc, BGE_TX_MODE);
3006 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
3007 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
3008 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
3009 mode |= CSR_READ_4(sc, BGE_TX_MODE) &
3010 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
3011 }
3012 /* Turn on transmitter */
3013 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
3014
3015 /* Turn on receiver */
3016 BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3017
3018 /*
3019 * Set the number of good frames to receive after RX MBUF
3020 * Low Watermark has been reached. After the RX MAC receives
3021 * this number of frames, it will drop subsequent incoming
3022 * frames until the MBUF High Watermark is reached.
3023 */
bcb29629 3024 if (BNX_IS_57765_FAMILY(sc))
6c8d8ecc
SZ
3025 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
3026 else
3027 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
3028
3029 if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) {
3030 if (bootverbose) {
3031 if_printf(ifp, "MSI_MODE: %#x\n",
3032 CSR_READ_4(sc, BGE_MSI_MODE));
3033 }
3034 }
3035
3036 /* Tell firmware we're alive. */
3037 BNX_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3038
3039 /* Enable host interrupts if polling(4) is not enabled. */
3040 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA, 4);
3041#ifdef DEVICE_POLLING
3042 if (ifp->if_flags & IFF_POLLING)
3043 bnx_disable_intr(sc);
3044 else
3045#endif
3046 bnx_enable_intr(sc);
3047
3048 bnx_ifmedia_upd(ifp);
3049
3050 ifp->if_flags |= IFF_RUNNING;
3051 ifp->if_flags &= ~IFF_OACTIVE;
3052
3053 callout_reset(&sc->bnx_stat_timer, hz, bnx_tick, sc);
3054}
3055
3056/*
3057 * Set media options.
3058 */
3059static int
3060bnx_ifmedia_upd(struct ifnet *ifp)
3061{
3062 struct bnx_softc *sc = ifp->if_softc;
3063
3064 /* If this is a 1000baseX NIC, enable the TBI port. */
3065 if (sc->bnx_flags & BNX_FLAG_TBI) {
3066 struct ifmedia *ifm = &sc->bnx_ifmedia;
3067
3068 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3069 return(EINVAL);
3070
3071 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3072 case IFM_AUTO:
3073 break;
3074
3075 case IFM_1000_SX:
3076 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3077 BNX_CLRBIT(sc, BGE_MAC_MODE,
3078 BGE_MACMODE_HALF_DUPLEX);
3079 } else {
3080 BNX_SETBIT(sc, BGE_MAC_MODE,
3081 BGE_MACMODE_HALF_DUPLEX);
3082 }
3083 break;
3084 default:
3085 return(EINVAL);
3086 }
3087 } else {
3088 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3089
3090 sc->bnx_link_evt++;
3091 sc->bnx_link = 0;
3092 if (mii->mii_instance) {
3093 struct mii_softc *miisc;
3094
3095 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3096 mii_phy_reset(miisc);
3097 }
3098 mii_mediachg(mii);
3099
3100 /*
3101 * Force an interrupt so that we will call bnx_link_upd
3102 * if needed and clear any pending link state attention.
3103 * Without this we are not getting any further interrupts
3104 * for link state changes and thus will not UP the link and
3105 * not be able to send in bnx_start. The only way to get
3106 * things working was to receive a packet and get an RX
3107 * intr.
3108 *
3109 * bnx_tick should help for fiber cards and we might not
3110 * need to do this here if BNX_FLAG_TBI is set but as
3111 * we poll for fiber anyway it should not harm.
3112 */
3113 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3114 }
3115 return(0);
3116}
3117
3118/*
3119 * Report current media status.
3120 */
3121static void
3122bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3123{
3124 struct bnx_softc *sc = ifp->if_softc;
3125
3126 if (sc->bnx_flags & BNX_FLAG_TBI) {
3127 ifmr->ifm_status = IFM_AVALID;
3128 ifmr->ifm_active = IFM_ETHER;
3129 if (CSR_READ_4(sc, BGE_MAC_STS) &
3130 BGE_MACSTAT_TBI_PCS_SYNCHED) {
3131 ifmr->ifm_status |= IFM_ACTIVE;
3132 } else {
3133 ifmr->ifm_active |= IFM_NONE;
3134 return;
3135 }
3136
3137 ifmr->ifm_active |= IFM_1000_SX;
3138 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3139 ifmr->ifm_active |= IFM_HDX;
3140 else
3141 ifmr->ifm_active |= IFM_FDX;
3142 } else {
3143 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3144
3145 mii_pollstat(mii);
3146 ifmr->ifm_active = mii->mii_media_active;
3147 ifmr->ifm_status = mii->mii_media_status;
3148 }
3149}
3150
3151static int
3152bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
3153{
3154 struct bnx_softc *sc = ifp->if_softc;
3155 struct ifreq *ifr = (struct ifreq *)data;
3156 int mask, error = 0;
3157
3158 ASSERT_SERIALIZED(ifp->if_serializer);
3159
3160 switch (command) {
3161 case SIOCSIFMTU:
3162 if ((!BNX_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) ||
3163 (BNX_IS_JUMBO_CAPABLE(sc) &&
3164 ifr->ifr_mtu > BNX_JUMBO_MTU)) {
3165 error = EINVAL;
3166 } else if (ifp->if_mtu != ifr->ifr_mtu) {
3167 ifp->if_mtu = ifr->ifr_mtu;
3168 if (ifp->if_flags & IFF_RUNNING)
3169 bnx_init(sc);
3170 }
3171 break;
3172 case SIOCSIFFLAGS:
3173 if (ifp->if_flags & IFF_UP) {
3174 if (ifp->if_flags & IFF_RUNNING) {
3175 mask = ifp->if_flags ^ sc->bnx_if_flags;
3176
3177 /*
3178 * If only the state of the PROMISC flag
3179 * changed, then just use the 'set promisc
3180 * mode' command instead of reinitializing
3181 * the entire NIC. Doing a full re-init
3182 * means reloading the firmware and waiting
3183 * for it to start up, which may take a
3184 * second or two. Similarly for ALLMULTI.
3185 */
3186 if (mask & IFF_PROMISC)
3187 bnx_setpromisc(sc);
3188 if (mask & IFF_ALLMULTI)
3189 bnx_setmulti(sc);
3190 } else {
3191 bnx_init(sc);
3192 }
3193 } else if (ifp->if_flags & IFF_RUNNING) {
3194 bnx_stop(sc);
3195 }
3196 sc->bnx_if_flags = ifp->if_flags;
3197 break;
3198 case SIOCADDMULTI:
3199 case SIOCDELMULTI:
3200 if (ifp->if_flags & IFF_RUNNING)
3201 bnx_setmulti(sc);
3202 break;
3203 case SIOCSIFMEDIA:
3204 case SIOCGIFMEDIA:
3205 if (sc->bnx_flags & BNX_FLAG_TBI) {
3206 error = ifmedia_ioctl(ifp, ifr,
3207 &sc->bnx_ifmedia, command);
3208 } else {
3209 struct mii_data *mii;
3210
3211 mii = device_get_softc(sc->bnx_miibus);
3212 error = ifmedia_ioctl(ifp, ifr,
3213 &mii->mii_media, command);
3214 }
3215 break;
3216 case SIOCSIFCAP:
3217 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3218 if (mask & IFCAP_HWCSUM) {
3219 ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
3220 if (IFCAP_HWCSUM & ifp->if_capenable)
3221 ifp->if_hwassist = BNX_CSUM_FEATURES;
3222 else
3223 ifp->if_hwassist = 0;
3224 }
3225 break;
3226 default:
3227 error = ether_ioctl(ifp, command, data);
3228 break;
3229 }
3230 return error;
3231}
3232
3233static void
3234bnx_watchdog(struct ifnet *ifp)
3235{
3236 struct bnx_softc *sc = ifp->if_softc;
3237
3238 if_printf(ifp, "watchdog timeout -- resetting\n");
3239
3240 bnx_init(sc);
3241
3242 ifp->if_oerrors++;
3243
3244 if (!ifq_is_empty(&ifp->if_snd))
3245 if_devstart(ifp);
3246}
3247
3248/*
3249 * Stop the adapter and free any mbufs allocated to the
3250 * RX and TX lists.
3251 */
3252static void
3253bnx_stop(struct bnx_softc *sc)
3254{
3255 struct ifnet *ifp = &sc->arpcom.ac_if;
3256
3257 ASSERT_SERIALIZED(ifp->if_serializer);
3258
3259 callout_stop(&sc->bnx_stat_timer);
3260
3261 /*
3262 * Disable all of the receiver blocks
3263 */
3264 bnx_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3265 bnx_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3266 bnx_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3267 bnx_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3268 bnx_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3269 bnx_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3270
3271 /*
3272 * Disable all of the transmit blocks
3273 */
3274 bnx_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3275 bnx_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3276 bnx_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3277 bnx_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3278 bnx_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3279 bnx_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3280
3281 /*
3282 * Shut down all of the memory managers and related
3283 * state machines.
3284 */
3285 bnx_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3286 bnx_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3287 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3288 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3289
3290 /* Disable host interrupts. */
3291 bnx_disable_intr(sc);
3292
3293 /*
3294 * Tell firmware we're shutting down.
3295 */
3296 BNX_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3297
3298 /* Free the RX lists. */
3299 bnx_free_rx_ring_std(sc);
3300
3301 /* Free jumbo RX list. */
3302 if (BNX_IS_JUMBO_CAPABLE(sc))
3303 bnx_free_rx_ring_jumbo(sc);
3304
3305 /* Free TX buffers. */
3306 bnx_free_tx_ring(sc);
3307
3308 sc->bnx_status_tag = 0;
3309 sc->bnx_link = 0;
3310 sc->bnx_coal_chg = 0;
3311
3312 sc->bnx_tx_saved_considx = BNX_TXCONS_UNSET;
3313
3314 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3315 ifp->if_timer = 0;
3316}
3317
3318/*
3319 * Stop all chip I/O so that the kernel's probe routines don't
3320 * get confused by errant DMAs when rebooting.
3321 */
3322static void
3323bnx_shutdown(device_t dev)
3324{
3325 struct bnx_softc *sc = device_get_softc(dev);
3326 struct ifnet *ifp = &sc->arpcom.ac_if;
3327
3328 lwkt_serialize_enter(ifp->if_serializer);
3329 bnx_stop(sc);
3330 bnx_reset(sc);
3331 lwkt_serialize_exit(ifp->if_serializer);
3332}
3333
3334static int
3335bnx_suspend(device_t dev)
3336{
3337 struct bnx_softc *sc = device_get_softc(dev);
3338 struct ifnet *ifp = &sc->arpcom.ac_if;
3339
3340 lwkt_serialize_enter(ifp->if_serializer);
3341 bnx_stop(sc);
3342 lwkt_serialize_exit(ifp->if_serializer);
3343
3344 return 0;
3345}
3346
3347static int
3348bnx_resume(device_t dev)
3349{
3350 struct bnx_softc *sc = device_get_softc(dev);
3351 struct ifnet *ifp = &sc->arpcom.ac_if;
3352
3353 lwkt_serialize_enter(ifp->if_serializer);
3354
3355 if (ifp->if_flags & IFF_UP) {
3356 bnx_init(sc);
3357
3358 if (!ifq_is_empty(&ifp->if_snd))
3359 if_devstart(ifp);
3360 }
3361
3362 lwkt_serialize_exit(ifp->if_serializer);
3363
3364 return 0;
3365}
3366
3367static void
3368bnx_setpromisc(struct bnx_softc *sc)
3369{
3370 struct ifnet *ifp = &sc->arpcom.ac_if;
3371
3372 if (ifp->if_flags & IFF_PROMISC)
3373 BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3374 else
3375 BNX_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3376}
3377
3378static void
3379bnx_dma_free(struct bnx_softc *sc)
3380{
3381 int i;
3382
3383 /* Destroy RX mbuf DMA stuffs. */
3384 if (sc->bnx_cdata.bnx_rx_mtag != NULL) {
3385 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3386 bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag,
3387 sc->bnx_cdata.bnx_rx_std_dmamap[i]);
3388 }
3389 bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag,
3390 sc->bnx_cdata.bnx_rx_tmpmap);
3391 bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag);
3392 }
3393
3394 /* Destroy TX mbuf DMA stuffs. */
3395 if (sc->bnx_cdata.bnx_tx_mtag != NULL) {
3396 for (i = 0; i < BGE_TX_RING_CNT; i++) {
3397 bus_dmamap_destroy(sc->bnx_cdata.bnx_tx_mtag,
3398 sc->bnx_cdata.bnx_tx_dmamap[i]);
3399 }
3400 bus_dma_tag_destroy(sc->bnx_cdata.bnx_tx_mtag);
3401 }
3402
3403 /* Destroy standard RX ring */
3404 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_std_ring_tag,
3405 sc->bnx_cdata.bnx_rx_std_ring_map,
3406 sc->bnx_ldata.bnx_rx_std_ring);
3407
3408 if (BNX_IS_JUMBO_CAPABLE(sc))
3409 bnx_free_jumbo_mem(sc);
3410
3411 /* Destroy RX return ring */
3412 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_return_ring_tag,
3413 sc->bnx_cdata.bnx_rx_return_ring_map,
3414 sc->bnx_ldata.bnx_rx_return_ring);
3415
3416 /* Destroy TX ring */
3417 bnx_dma_block_free(sc->bnx_cdata.bnx_tx_ring_tag,
3418 sc->bnx_cdata.bnx_tx_ring_map,
3419 sc->bnx_ldata.bnx_tx_ring);
3420
3421 /* Destroy status block */
3422 bnx_dma_block_free(sc->bnx_cdata.bnx_status_tag,
3423 sc->bnx_cdata.bnx_status_map,
3424 sc->bnx_ldata.bnx_status_block);
3425
3426 /* Destroy the parent tag */
3427 if (sc->bnx_cdata.bnx_parent_tag != NULL)
3428 bus_dma_tag_destroy(sc->bnx_cdata.bnx_parent_tag);
3429}
3430
3431static int
3432bnx_dma_alloc(struct bnx_softc *sc)
3433{
3434 struct ifnet *ifp = &sc->arpcom.ac_if;
3435 int i, error;
3436
3437 /*
3438 * Allocate the parent bus DMA tag appropriate for PCI.
3439 *
3440 * All of the NetExtreme/NetLink controllers have 4GB boundary
3441 * DMA bug.
3442 * Whenever an address crosses a multiple of the 4GB boundary
3443 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
3444 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
3445 * state machine will lockup and cause the device to hang.
3446 */
3447 error = bus_dma_tag_create(NULL, 1, BGE_DMA_BOUNDARY_4G,
3448 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3449 NULL, NULL,
3450 BUS_SPACE_MAXSIZE_32BIT, 0,
3451 BUS_SPACE_MAXSIZE_32BIT,
3452 0, &sc->bnx_cdata.bnx_parent_tag);
3453 if (error) {
3454 if_printf(ifp, "could not allocate parent dma tag\n");
3455 return error;
3456 }
3457
3458 /*
3459 * Create DMA tag and maps for RX mbufs.
3460 */
3461 error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0,
3462 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3463 NULL, NULL, MCLBYTES, 1, MCLBYTES,
3464 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
3465 &sc->bnx_cdata.bnx_rx_mtag);
3466 if (error) {
3467 if_printf(ifp, "could not allocate RX mbuf dma tag\n");
3468 return error;
3469 }
3470
3471 error = bus_dmamap_create(sc->bnx_cdata.bnx_rx_mtag,
3472 BUS_DMA_WAITOK, &sc->bnx_cdata.bnx_rx_tmpmap);
3473 if (error) {
3474 bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag);
3475 sc->bnx_cdata.bnx_rx_mtag = NULL;
3476 return error;
3477 }
3478
3479 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3480 error = bus_dmamap_create(sc->bnx_cdata.bnx_rx_mtag,
3481 BUS_DMA_WAITOK,
3482 &sc->bnx_cdata.bnx_rx_std_dmamap[i]);
3483 if (error) {
3484 int j;
3485
3486 for (j = 0; j < i; ++j) {
3487 bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag,
3488 sc->bnx_cdata.bnx_rx_std_dmamap[j]);
3489 }
3490 bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag);
3491 sc->bnx_cdata.bnx_rx_mtag = NULL;
3492
3493 if_printf(ifp, "could not create DMA map for RX\n");
3494 return error;
3495 }
3496 }
3497
3498 /*
3499 * Create DMA tag and maps for TX mbufs.
3500 */
3501 error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0,
3502 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3503 NULL, NULL,
3504 BNX_JUMBO_FRAMELEN, BNX_NSEG_NEW, MCLBYTES,
3505 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK |
3506 BUS_DMA_ONEBPAGE,
3507 &sc->bnx_cdata.bnx_tx_mtag);
3508 if (error) {
3509 if_printf(ifp, "could not allocate TX mbuf dma tag\n");
3510 return error;
3511 }
3512
3513 for (i = 0; i < BGE_TX_RING_CNT; i++) {
3514 error = bus_dmamap_create(sc->bnx_cdata.bnx_tx_mtag,
3515 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
3516 &sc->bnx_cdata.bnx_tx_dmamap[i]);
3517 if (error) {
3518 int j;
3519
3520 for (j = 0; j < i; ++j) {
3521 bus_dmamap_destroy(sc->bnx_cdata.bnx_tx_mtag,
3522 sc->bnx_cdata.bnx_tx_dmamap[j]);
3523 }
3524 bus_dma_tag_destroy(sc->bnx_cdata.bnx_tx_mtag);
3525 sc->bnx_cdata.bnx_tx_mtag = NULL;
3526
3527 if_printf(ifp, "could not create DMA map for TX\n");
3528 return error;
3529 }
3530 }
3531
3532 /*
3533 * Create DMA stuffs for standard RX ring.
3534 */
3535 error = bnx_dma_block_alloc(sc, BGE_STD_RX_RING_SZ,
3536 &sc->bnx_cdata.bnx_rx_std_ring_tag,
3537 &sc->bnx_cdata.bnx_rx_std_ring_map,
3538 (void *)&sc->bnx_ldata.bnx_rx_std_ring,
3539 &sc->bnx_ldata.bnx_rx_std_ring_paddr);
3540 if (error) {
3541 if_printf(ifp, "could not create std RX ring\n");
3542 return error;
3543 }
3544
3545 /*
3546 * Create jumbo buffer pool.
3547 */
3548 if (BNX_IS_JUMBO_CAPABLE(sc)) {
3549 error = bnx_alloc_jumbo_mem(sc);
3550 if (error) {
3551 if_printf(ifp, "could not create jumbo buffer pool\n");
3552 return error;
3553 }
3554 }
3555
3556 /*
3557 * Create DMA stuffs for RX return ring.
3558 */
3559 error = bnx_dma_block_alloc(sc,
3560 BGE_RX_RTN_RING_SZ(sc->bnx_return_ring_cnt),
3561 &sc->bnx_cdata.bnx_rx_return_ring_tag,
3562 &sc->bnx_cdata.bnx_rx_return_ring_map,
3563 (void *)&sc->bnx_ldata.bnx_rx_return_ring,
3564 &sc->bnx_ldata.bnx_rx_return_ring_paddr);
3565 if (error) {
3566 if_printf(ifp, "could not create RX ret ring\n");
3567 return error;
3568 }
3569
3570 /*
3571 * Create DMA stuffs for TX ring.
3572 */
3573 error = bnx_dma_block_alloc(sc, BGE_TX_RING_SZ,
3574 &sc->bnx_cdata.bnx_tx_ring_tag,
3575 &sc->bnx_cdata.bnx_tx_ring_map,
3576 (void *)&sc->bnx_ldata.bnx_tx_ring,
3577 &sc->bnx_ldata.bnx_tx_ring_paddr);
3578 if (error) {
3579 if_printf(ifp, "could not create TX ring\n");
3580 return error;
3581 }
3582
3583 /*
3584 * Create DMA stuffs for status block.
3585 */
3586 error = bnx_dma_block_alloc(sc, BGE_STATUS_BLK_SZ,
3587 &sc->bnx_cdata.bnx_status_tag,
3588 &sc->bnx_cdata.bnx_status_map,
3589 (void *)&sc->bnx_ldata.bnx_status_block,
3590 &sc->bnx_ldata.bnx_status_block_paddr);
3591 if (error) {
3592 if_printf(ifp, "could not create status block\n");
3593 return error;
3594 }
3595
3596 return 0;
3597}
3598
3599static int
3600bnx_dma_block_alloc(struct bnx_softc *sc, bus_size_t size, bus_dma_tag_t *tag,
3601 bus_dmamap_t *map, void **addr, bus_addr_t *paddr)
3602{
3603 bus_dmamem_t dmem;
3604 int error;
3605
3606 error = bus_dmamem_coherent(sc->bnx_cdata.bnx_parent_tag, PAGE_SIZE, 0,
3607 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3608 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3609 if (error)
3610 return error;
3611
3612 *tag = dmem.dmem_tag;
3613 *map = dmem.dmem_map;
3614 *addr = dmem.dmem_addr;
3615 *paddr = dmem.dmem_busaddr;
3616
3617 return 0;
3618}
3619
3620static void
3621bnx_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr)
3622{
3623 if (tag != NULL) {
3624 bus_dmamap_unload(tag, map);
3625 bus_dmamem_free(tag, addr, map);
3626 bus_dma_tag_destroy(tag);
3627 }
3628}
3629
3630static void
3631bnx_tbi_link_upd(struct bnx_softc *sc, uint32_t status)
3632{
3633 struct ifnet *ifp = &sc->arpcom.ac_if;
3634
3635#define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE)
3636
3637 /*
3638 * Sometimes PCS encoding errors are detected in
3639 * TBI mode (on fiber NICs), and for some reason
3640 * the chip will signal them as link changes.
3641 * If we get a link change event, but the 'PCS
3642 * encoding error' bit in the MAC status register
3643 * is set, don't bother doing a link check.
3644 * This avoids spurious "gigabit link up" messages
3645 * that sometimes appear on fiber NICs during
3646 * periods of heavy traffic.
3647 */
3648 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3649 if (!sc->bnx_link) {
3650 sc->bnx_link++;
3651 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704) {
3652 BNX_CLRBIT(sc, BGE_MAC_MODE,
3653 BGE_MACMODE_TBI_SEND_CFGS);
3654 }
3655 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3656
3657 if (bootverbose)
3658 if_printf(ifp, "link UP\n");
3659
3660 ifp->if_link_state = LINK_STATE_UP;
3661 if_link_state_change(ifp);
3662 }
3663 } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) {
3664 if (sc->bnx_link) {
3665 sc->bnx_link = 0;
3666
3667 if (bootverbose)
3668 if_printf(ifp, "link DOWN\n");
3669
3670 ifp->if_link_state = LINK_STATE_DOWN;
3671 if_link_state_change(ifp);
3672 }
3673 }
3674
3675#undef PCS_ENCODE_ERR
3676
3677 /* Clear the attention. */
3678 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3679 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3680 BGE_MACSTAT_LINK_CHANGED);
3681}
3682
3683static void
3684bnx_copper_link_upd(struct bnx_softc *sc, uint32_t status __unused)
3685{
3686 struct ifnet *ifp = &sc->arpcom.ac_if;
3687 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3688
3689 mii_pollstat(mii);
3690 bnx_miibus_statchg(sc->bnx_dev);
3691
3692 if (bootverbose) {
3693 if (sc->bnx_link)
3694 if_printf(ifp, "link UP\n");
3695 else
3696 if_printf(ifp, "link DOWN\n");
3697 }
3698
3699 /* Clear the attention. */
3700 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3701 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3702 BGE_MACSTAT_LINK_CHANGED);
3703}
3704
3705static void
3706bnx_autopoll_link_upd(struct bnx_softc *sc, uint32_t status __unused)
3707{
3708 struct ifnet *ifp = &sc->arpcom.ac_if;
3709 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3710
3711 mii_pollstat(mii);
3712
3713 if (!sc->bnx_link &&
3714 (mii->mii_media_status & IFM_ACTIVE) &&
3715 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3716 sc->bnx_link++;
3717 if (bootverbose)
3718 if_printf(ifp, "link UP\n");
3719 } else if (sc->bnx_link &&
3720 (!(mii->mii_media_status & IFM_ACTIVE) ||
3721 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3722 sc->bnx_link = 0;
3723 if (bootverbose)
3724 if_printf(ifp, "link DOWN\n");
3725 }
3726
3727 /* Clear the attention. */
3728 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3729 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3730 BGE_MACSTAT_LINK_CHANGED);
3731}
3732
3733static int
3734bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS)
3735{
3736 struct bnx_softc *sc = arg1;
3737
3738 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3739 &sc->bnx_rx_coal_ticks,
3740 BNX_RX_COAL_TICKS_MIN, BNX_RX_COAL_TICKS_MAX,
3741 BNX_RX_COAL_TICKS_CHG);
3742}
3743
3744static int
3745bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS)
3746{
3747 struct bnx_softc *sc = arg1;
3748
3749 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3750 &sc->bnx_tx_coal_ticks,
3751 BNX_TX_COAL_TICKS_MIN, BNX_TX_COAL_TICKS_MAX,
3752 BNX_TX_COAL_TICKS_CHG);
3753}
3754
3755static int
3756bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS)
3757{
3758 struct bnx_softc *sc = arg1;
3759
3760 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3761 &sc->bnx_rx_coal_bds,
3762 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX,
3763 BNX_RX_COAL_BDS_CHG);
3764}
3765
3766static int
3767bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS)
3768{
3769 struct bnx_softc *sc = arg1;
3770
3771 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3772 &sc->bnx_tx_coal_bds,
3773 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX,
3774 BNX_TX_COAL_BDS_CHG);
3775}
3776
3777static int
3778bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS)
3779{
3780 struct bnx_softc *sc = arg1;
3781
3782 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3783 &sc->bnx_rx_coal_bds_int,
3784 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX,
3785 BNX_RX_COAL_BDS_INT_CHG);
3786}
3787
3788static int
3789bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS)
3790{
3791 struct bnx_softc *sc = arg1;
3792
3793 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3794 &sc->bnx_tx_coal_bds_int,
3795 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX,
3796 BNX_TX_COAL_BDS_INT_CHG);
3797}
3798
3799static int
3800bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal,
3801 int coal_min, int coal_max, uint32_t coal_chg_mask)
3802{
3803 struct bnx_softc *sc = arg1;
3804 struct ifnet *ifp = &sc->arpcom.ac_if;
3805 int error = 0, v;
3806
3807 lwkt_serialize_enter(ifp->if_serializer);
3808
3809 v = *coal;
3810 error = sysctl_handle_int(oidp, &v, 0, req);
3811 if (!error && req->newptr != NULL) {
3812 if (v < coal_min || v > coal_max) {
3813 error = EINVAL;
3814 } else {
3815 *coal = v;
3816 sc->bnx_coal_chg |= coal_chg_mask;
3817 }
3818 }
3819
3820 lwkt_serialize_exit(ifp->if_serializer);
3821 return error;
3822}
3823
3824static void
3825bnx_coal_change(struct bnx_softc *sc)
3826{
3827 struct ifnet *ifp = &sc->arpcom.ac_if;
3828 uint32_t val;
3829
3830 ASSERT_SERIALIZED(ifp->if_serializer);
3831
3832 if (sc->bnx_coal_chg & BNX_RX_COAL_TICKS_CHG) {
3833 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS,
3834 sc->bnx_rx_coal_ticks);
3835 DELAY(10);
3836 val = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
3837
3838 if (bootverbose) {
3839 if_printf(ifp, "rx_coal_ticks -> %u\n",
3840 sc->bnx_rx_coal_ticks);
3841 }
3842 }
3843
3844 if (sc->bnx_coal_chg & BNX_TX_COAL_TICKS_CHG) {
3845 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS,
3846 sc->bnx_tx_coal_ticks);
3847 DELAY(10);
3848 val = CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS);
3849
3850 if (bootverbose) {
3851 if_printf(ifp, "tx_coal_ticks -> %u\n",
3852 sc->bnx_tx_coal_ticks);
3853 }
3854 }
3855
3856 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_CHG) {
3857 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS,
3858 sc->bnx_rx_coal_bds);
3859 DELAY(10);
3860 val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
3861
3862 if (bootverbose) {
3863 if_printf(ifp, "rx_coal_bds -> %u\n",
3864 sc->bnx_rx_coal_bds);
3865 }
3866 }
3867
3868 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_CHG) {
3869 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS,
3870 sc->bnx_tx_coal_bds);
3871 DELAY(10);
3872 val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS);
3873
3874 if (bootverbose) {
3875 if_printf(ifp, "tx_max_coal_bds -> %u\n",
3876 sc->bnx_tx_coal_bds);
3877 }
3878 }
3879
3880 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_INT_CHG) {
3881 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT,
3882 sc->bnx_rx_coal_bds_int);
3883 DELAY(10);
3884 val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT);
3885
3886 if (bootverbose) {
3887 if_printf(ifp, "rx_coal_bds_int -> %u\n",
3888 sc->bnx_rx_coal_bds_int);
3889 }
3890 }
3891
3892 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_INT_CHG) {
3893 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT,
3894 sc->bnx_tx_coal_bds_int);
3895 DELAY(10);
3896 val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT);
3897
3898 if (bootverbose) {
3899 if_printf(ifp, "tx_coal_bds_int -> %u\n",
3900 sc->bnx_tx_coal_bds_int);
3901 }
3902 }
3903
3904 sc->bnx_coal_chg = 0;
3905}
3906
3907static void
3908bnx_enable_intr(struct bnx_softc *sc)
3909{
3910 struct ifnet *ifp = &sc->arpcom.ac_if;
3911
3912 lwkt_serialize_handler_enable(ifp->if_serializer);
3913
3914 /*
3915 * Enable interrupt.
3916 */
3917 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24);
3918 if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) {
3919 /* XXX Linux driver */
3920 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24);
3921 }
3922
3923 /*
3924 * Unmask the interrupt when we stop polling.
3925 */
3926 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_MISC_CTL,
3927 BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
3928
3929 /*
3930 * Trigger another interrupt, since above writing
3931 * to interrupt mailbox0 may acknowledge pending
3932 * interrupt.
3933 */
3934 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3935}
3936
3937static void
3938bnx_disable_intr(struct bnx_softc *sc)
3939{
3940 struct ifnet *ifp = &sc->arpcom.ac_if;
3941
3942 /*
3943 * Mask the interrupt when we start polling.
3944 */
3945 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL,
3946 BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
3947
3948 /*
3949 * Acknowledge possible asserted interrupt.
3950 */
3951 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3952
3953 lwkt_serialize_handler_disable(ifp->if_serializer);
3954}
3955
3956static int
3957bnx_get_eaddr_mem(struct bnx_softc *sc, uint8_t ether_addr[])
3958{
3959 uint32_t mac_addr;
3960 int ret = 1;
3961
3962 mac_addr = bnx_readmem_ind(sc, 0x0c14);
3963 if ((mac_addr >> 16) == 0x484b) {
3964 ether_addr[0] = (uint8_t)(mac_addr >> 8);
3965 ether_addr[1] = (uint8_t)mac_addr;
3966 mac_addr = bnx_readmem_ind(sc, 0x0c18);
3967 ether_addr[2] = (uint8_t)(mac_addr >> 24);
3968 ether_addr[3] = (uint8_t)(mac_addr >> 16);
3969 ether_addr[4] = (uint8_t)(mac_addr >> 8);
3970 ether_addr[5] = (uint8_t)mac_addr;
3971 ret = 0;
3972 }
3973 return ret;
3974}
3975
3976static int
3977bnx_get_eaddr_nvram(struct bnx_softc *sc, uint8_t ether_addr[])
3978{
3979 int mac_offset = BGE_EE_MAC_OFFSET;
3980
80969639
SZ
3981 if (BNX_IS_5717_PLUS(sc)) {
3982 int f;
3983
3984 f = pci_get_function(sc->bnx_dev);
3985 if (f & 1)
3986 mac_offset = BGE_EE_MAC_OFFSET_5717;
3987 if (f > 1)
3988 mac_offset += BGE_EE_MAC_OFFSET_5717_OFF;
3989 } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
6c8d8ecc 3990 mac_offset = BGE_EE_MAC_OFFSET_5906;
80969639 3991 }
6c8d8ecc
SZ
3992
3993 return bnx_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN);
3994}
3995
3996static int
3997bnx_get_eaddr_eeprom(struct bnx_softc *sc, uint8_t ether_addr[])
3998{
3999 if (sc->bnx_flags & BNX_FLAG_NO_EEPROM)
4000 return 1;
4001
4002 return bnx_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
4003 ETHER_ADDR_LEN);
4004}
4005
4006static int
4007bnx_get_eaddr(struct bnx_softc *sc, uint8_t eaddr[])
4008{
4009 static const bnx_eaddr_fcn_t bnx_eaddr_funcs[] = {
4010 /* NOTE: Order is critical */
4011 bnx_get_eaddr_mem,
4012 bnx_get_eaddr_nvram,
4013 bnx_get_eaddr_eeprom,
4014 NULL
4015 };
4016 const bnx_eaddr_fcn_t *func;
4017
4018 for (func = bnx_eaddr_funcs; *func != NULL; ++func) {
4019 if ((*func)(sc, eaddr) == 0)
4020 break;
4021 }
4022 return (*func == NULL ? ENXIO : 0);
4023}
4024
4025/*
4026 * NOTE: 'm' is not freed upon failure
4027 */
4028struct mbuf *
4029bnx_defrag_shortdma(struct mbuf *m)
4030{
4031 struct mbuf *n;
4032 int found;
4033
4034 /*
4035 * If device receive two back-to-back send BDs with less than
4036 * or equal to 8 total bytes then the device may hang. The two
4037 * back-to-back send BDs must in the same frame for this failure
4038 * to occur. Scan mbuf chains and see whether two back-to-back
4039 * send BDs are there. If this is the case, allocate new mbuf
4040 * and copy the frame to workaround the silicon bug.
4041 */
4042 for (n = m, found = 0; n != NULL; n = n->m_next) {
4043 if (n->m_len < 8) {
4044 found++;
4045 if (found > 1)
4046 break;
4047 continue;
4048 }
4049 found = 0;
4050 }
4051
4052 if (found > 1)
4053 n = m_defrag(m, MB_DONTWAIT);
4054 else
4055 n = m;
4056 return n;
4057}
4058
4059static void
4060bnx_stop_block(struct bnx_softc *sc, bus_size_t reg, uint32_t bit)
4061{
4062 int i;
4063
4064 BNX_CLRBIT(sc, reg, bit);
4065 for (i = 0; i < BNX_TIMEOUT; i++) {
4066 if ((CSR_READ_4(sc, reg) & bit) == 0)
4067 return;
4068 DELAY(100);
4069 }
4070}
4071
4072static void
4073bnx_link_poll(struct bnx_softc *sc)
4074{
4075 uint32_t status;
4076
4077 status = CSR_READ_4(sc, BGE_MAC_STS);
4078 if ((status & sc->bnx_link_chg) || sc->bnx_link_evt) {
4079 sc->bnx_link_evt = 0;
4080 sc->bnx_link_upd(sc, status);
4081 }
4082}
4083
4084static void
4085bnx_enable_msi(struct bnx_softc *sc)
4086{
4087 uint32_t msi_mode;
4088
4089 msi_mode = CSR_READ_4(sc, BGE_MSI_MODE);
4090 msi_mode |= BGE_MSIMODE_ENABLE;
4091 if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) {
4092 /*
4093 * NOTE:
4094 * 5718-PG105-R says that "one shot" mode
4095 * does not work if MSI is used, however,
4096 * it obviously works.
4097 */
4098 msi_mode &= ~BGE_MSIMODE_ONESHOT_DISABLE;
4099 }
4100 CSR_WRITE_4(sc, BGE_MSI_MODE, msi_mode);
4101}
4102
4103static uint32_t
4104bnx_dma_swap_options(struct bnx_softc *sc)
4105{
4106 uint32_t dma_options;
4107
4108 dma_options = BGE_MODECTL_WORDSWAP_NONFRAME |
4109 BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA;
4110#if BYTE_ORDER == BIG_ENDIAN
4111 dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME;
4112#endif
4113 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
4114 dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA |
4115 BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE |
4116 BGE_MODECTL_HTX2B_ENABLE;
4117 }
4118 return dma_options;
4119}