bnx: Commit coalesce parameters changes in polling code
[dragonfly.git] / sys / dev / netif / bnx / if_bnx.c
CommitLineData
6c8d8ecc
SZ
1/*
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
34 */
35
66deb1c1 36#include "opt_bnx.h"
39a8d43a 37#include "opt_ifpoll.h"
6c8d8ecc
SZ
38
39#include <sys/param.h>
40#include <sys/bus.h>
41#include <sys/endian.h>
42#include <sys/kernel.h>
43#include <sys/interrupt.h>
44#include <sys/mbuf.h>
45#include <sys/malloc.h>
46#include <sys/queue.h>
47#include <sys/rman.h>
48#include <sys/serialize.h>
49#include <sys/socket.h>
50#include <sys/sockio.h>
51#include <sys/sysctl.h>
52
66deb1c1
SZ
53#include <netinet/ip.h>
54#include <netinet/tcp.h>
55
6c8d8ecc
SZ
56#include <net/bpf.h>
57#include <net/ethernet.h>
58#include <net/if.h>
59#include <net/if_arp.h>
60#include <net/if_dl.h>
61#include <net/if_media.h>
39a8d43a 62#include <net/if_poll.h>
6c8d8ecc
SZ
63#include <net/if_types.h>
64#include <net/ifq_var.h>
65#include <net/vlan/if_vlan_var.h>
66#include <net/vlan/if_vlan_ether.h>
67
68#include <dev/netif/mii_layer/mii.h>
69#include <dev/netif/mii_layer/miivar.h>
70#include <dev/netif/mii_layer/brgphyreg.h>
71
72#include <bus/pci/pcidevs.h>
73#include <bus/pci/pcireg.h>
74#include <bus/pci/pcivar.h>
75
76#include <dev/netif/bge/if_bgereg.h>
77#include <dev/netif/bnx/if_bnxvar.h>
78
79/* "device miibus" required. See GENERIC if you get errors here. */
80#include "miibus_if.h"
81
3b18363f 82#define BNX_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
6c8d8ecc 83
df9ccc98
SZ
84#define BNX_INTR_CKINTVL ((10 * hz) / 1000) /* 10ms */
85
6c8d8ecc
SZ
86static const struct bnx_type {
87 uint16_t bnx_vid;
88 uint16_t bnx_did;
89 char *bnx_name;
90} bnx_devs[] = {
91 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717,
92 "Broadcom BCM5717 Gigabit Ethernet" },
93 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718,
94 "Broadcom BCM5718 Gigabit Ethernet" },
95 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5719,
96 "Broadcom BCM5719 Gigabit Ethernet" },
97 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720_ALT,
98 "Broadcom BCM5720 Gigabit Ethernet" },
99
100 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57761,
101 "Broadcom BCM57761 Gigabit Ethernet" },
32ff3c80
SZ
102 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57762,
103 "Broadcom BCM57762 Gigabit Ethernet" },
6c8d8ecc
SZ
104 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57765,
105 "Broadcom BCM57765 Gigabit Ethernet" },
32ff3c80
SZ
106 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57766,
107 "Broadcom BCM57766 Gigabit Ethernet" },
108 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57781,
109 "Broadcom BCM57781 Gigabit Ethernet" },
110 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57782,
111 "Broadcom BCM57782 Gigabit Ethernet" },
6c8d8ecc
SZ
112 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57785,
113 "Broadcom BCM57785 Gigabit Ethernet" },
32ff3c80
SZ
114 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57786,
115 "Broadcom BCM57786 Gigabit Ethernet" },
116 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57791,
117 "Broadcom BCM57791 Fast Ethernet" },
6c8d8ecc
SZ
118 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57795,
119 "Broadcom BCM57795 Fast Ethernet" },
120
121 { 0, 0, NULL }
122};
123
124#define BNX_IS_JUMBO_CAPABLE(sc) ((sc)->bnx_flags & BNX_FLAG_JUMBO)
125#define BNX_IS_5717_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_5717_PLUS)
f368d0d9
SZ
126#define BNX_IS_57765_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_57765_PLUS)
127#define BNX_IS_57765_FAMILY(sc) \
128 ((sc)->bnx_flags & BNX_FLAG_57765_FAMILY)
6c8d8ecc
SZ
129
130typedef int (*bnx_eaddr_fcn_t)(struct bnx_softc *, uint8_t[]);
131
132static int bnx_probe(device_t);
133static int bnx_attach(device_t);
134static int bnx_detach(device_t);
135static void bnx_shutdown(device_t);
136static int bnx_suspend(device_t);
137static int bnx_resume(device_t);
138static int bnx_miibus_readreg(device_t, int, int);
139static int bnx_miibus_writereg(device_t, int, int, int);
140static void bnx_miibus_statchg(device_t);
141
39a8d43a
SZ
142#ifdef IFPOLL_ENABLE
143static void bnx_npoll(struct ifnet *, struct ifpoll_info *);
144static void bnx_npoll_compat(struct ifnet *, void *, int);
6c8d8ecc
SZ
145#endif
146static void bnx_intr_legacy(void *);
147static void bnx_msi(void *);
148static void bnx_msi_oneshot(void *);
149static void bnx_intr(struct bnx_softc *);
150static void bnx_enable_intr(struct bnx_softc *);
151static void bnx_disable_intr(struct bnx_softc *);
152static void bnx_txeof(struct bnx_softc *, uint16_t);
153static void bnx_rxeof(struct bnx_softc *, uint16_t);
154
155static void bnx_start(struct ifnet *);
156static int bnx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
157static void bnx_init(void *);
158static void bnx_stop(struct bnx_softc *);
159static void bnx_watchdog(struct ifnet *);
160static int bnx_ifmedia_upd(struct ifnet *);
161static void bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *);
162static void bnx_tick(void *);
163
164static int bnx_alloc_jumbo_mem(struct bnx_softc *);
165static void bnx_free_jumbo_mem(struct bnx_softc *);
166static struct bnx_jslot
167 *bnx_jalloc(struct bnx_softc *);
168static void bnx_jfree(void *);
169static void bnx_jref(void *);
170static int bnx_newbuf_std(struct bnx_softc *, int, int);
171static int bnx_newbuf_jumbo(struct bnx_softc *, int, int);
172static void bnx_setup_rxdesc_std(struct bnx_softc *, int);
173static void bnx_setup_rxdesc_jumbo(struct bnx_softc *, int);
174static int bnx_init_rx_ring_std(struct bnx_softc *);
175static void bnx_free_rx_ring_std(struct bnx_softc *);
176static int bnx_init_rx_ring_jumbo(struct bnx_softc *);
177static void bnx_free_rx_ring_jumbo(struct bnx_softc *);
178static void bnx_free_tx_ring(struct bnx_softc *);
179static int bnx_init_tx_ring(struct bnx_softc *);
180static int bnx_dma_alloc(struct bnx_softc *);
181static void bnx_dma_free(struct bnx_softc *);
182static int bnx_dma_block_alloc(struct bnx_softc *, bus_size_t,
183 bus_dma_tag_t *, bus_dmamap_t *, void **, bus_addr_t *);
184static void bnx_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
185static struct mbuf *
186 bnx_defrag_shortdma(struct mbuf *);
187static int bnx_encap(struct bnx_softc *, struct mbuf **, uint32_t *);
66deb1c1
SZ
188static int bnx_setup_tso(struct bnx_softc *, struct mbuf **,
189 uint16_t *, uint16_t *);
6c8d8ecc
SZ
190
191static void bnx_reset(struct bnx_softc *);
192static int bnx_chipinit(struct bnx_softc *);
193static int bnx_blockinit(struct bnx_softc *);
194static void bnx_stop_block(struct bnx_softc *, bus_size_t, uint32_t);
195static void bnx_enable_msi(struct bnx_softc *sc);
196static void bnx_setmulti(struct bnx_softc *);
197static void bnx_setpromisc(struct bnx_softc *);
198static void bnx_stats_update_regs(struct bnx_softc *);
199static uint32_t bnx_dma_swap_options(struct bnx_softc *);
200
201static uint32_t bnx_readmem_ind(struct bnx_softc *, uint32_t);
202static void bnx_writemem_ind(struct bnx_softc *, uint32_t, uint32_t);
203#ifdef notdef
204static uint32_t bnx_readreg_ind(struct bnx_softc *, uint32_t);
205#endif
206static void bnx_writereg_ind(struct bnx_softc *, uint32_t, uint32_t);
207static void bnx_writemem_direct(struct bnx_softc *, uint32_t, uint32_t);
208static void bnx_writembx(struct bnx_softc *, int, int);
209static uint8_t bnx_nvram_getbyte(struct bnx_softc *, int, uint8_t *);
210static int bnx_read_nvram(struct bnx_softc *, caddr_t, int, int);
211static uint8_t bnx_eeprom_getbyte(struct bnx_softc *, uint32_t, uint8_t *);
212static int bnx_read_eeprom(struct bnx_softc *, caddr_t, uint32_t, size_t);
213
214static void bnx_tbi_link_upd(struct bnx_softc *, uint32_t);
215static void bnx_copper_link_upd(struct bnx_softc *, uint32_t);
216static void bnx_autopoll_link_upd(struct bnx_softc *, uint32_t);
217static void bnx_link_poll(struct bnx_softc *);
218
219static int bnx_get_eaddr_mem(struct bnx_softc *, uint8_t[]);
220static int bnx_get_eaddr_nvram(struct bnx_softc *, uint8_t[]);
221static int bnx_get_eaddr_eeprom(struct bnx_softc *, uint8_t[]);
222static int bnx_get_eaddr(struct bnx_softc *, uint8_t[]);
223
224static void bnx_coal_change(struct bnx_softc *);
225static int bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS);
226static int bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS);
227static int bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS);
228static int bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS);
229static int bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS);
230static int bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS);
231static int bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *,
232 int, int, uint32_t);
233
234static int bnx_msi_enable = 1;
235TUNABLE_INT("hw.bnx.msi.enable", &bnx_msi_enable);
236
237static device_method_t bnx_methods[] = {
238 /* Device interface */
239 DEVMETHOD(device_probe, bnx_probe),
240 DEVMETHOD(device_attach, bnx_attach),
241 DEVMETHOD(device_detach, bnx_detach),
242 DEVMETHOD(device_shutdown, bnx_shutdown),
243 DEVMETHOD(device_suspend, bnx_suspend),
244 DEVMETHOD(device_resume, bnx_resume),
245
246 /* bus interface */
247 DEVMETHOD(bus_print_child, bus_generic_print_child),
248 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
249
250 /* MII interface */
251 DEVMETHOD(miibus_readreg, bnx_miibus_readreg),
252 DEVMETHOD(miibus_writereg, bnx_miibus_writereg),
253 DEVMETHOD(miibus_statchg, bnx_miibus_statchg),
254
255 { 0, 0 }
256};
257
258static DEFINE_CLASS_0(bnx, bnx_driver, bnx_methods, sizeof(struct bnx_softc));
259static devclass_t bnx_devclass;
260
261DECLARE_DUMMY_MODULE(if_bnx);
262DRIVER_MODULE(if_bnx, pci, bnx_driver, bnx_devclass, NULL, NULL);
263DRIVER_MODULE(miibus, bnx, miibus_driver, miibus_devclass, NULL, NULL);
264
265static uint32_t
266bnx_readmem_ind(struct bnx_softc *sc, uint32_t off)
267{
268 device_t dev = sc->bnx_dev;
269 uint32_t val;
270
271 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
272 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
273 return 0;
274
275 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
276 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
277 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
278 return (val);
279}
280
281static void
282bnx_writemem_ind(struct bnx_softc *sc, uint32_t off, uint32_t val)
283{
284 device_t dev = sc->bnx_dev;
285
286 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
287 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
288 return;
289
290 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
291 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
292 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
293}
294
295#ifdef notdef
296static uint32_t
297bnx_readreg_ind(struct bnx_softc *sc, uin32_t off)
298{
299 device_t dev = sc->bnx_dev;
300
301 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
302 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
303}
304#endif
305
306static void
307bnx_writereg_ind(struct bnx_softc *sc, uint32_t off, uint32_t val)
308{
309 device_t dev = sc->bnx_dev;
310
311 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
312 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
313}
314
315static void
316bnx_writemem_direct(struct bnx_softc *sc, uint32_t off, uint32_t val)
317{
318 CSR_WRITE_4(sc, off, val);
319}
320
321static void
322bnx_writembx(struct bnx_softc *sc, int off, int val)
323{
324 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906)
325 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
326
327 CSR_WRITE_4(sc, off, val);
328}
329
330static uint8_t
331bnx_nvram_getbyte(struct bnx_softc *sc, int addr, uint8_t *dest)
332{
333 uint32_t access, byte = 0;
334 int i;
335
336 /* Lock. */
337 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
338 for (i = 0; i < 8000; i++) {
339 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
340 break;
341 DELAY(20);
342 }
343 if (i == 8000)
344 return (1);
345
346 /* Enable access. */
347 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
348 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
349
350 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
351 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
352 for (i = 0; i < BNX_TIMEOUT * 10; i++) {
353 DELAY(10);
354 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
355 DELAY(10);
356 break;
357 }
358 }
359
360 if (i == BNX_TIMEOUT * 10) {
361 if_printf(&sc->arpcom.ac_if, "nvram read timed out\n");
362 return (1);
363 }
364
365 /* Get result. */
366 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
367
368 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
369
370 /* Disable access. */
371 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
372
373 /* Unlock. */
374 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
375 CSR_READ_4(sc, BGE_NVRAM_SWARB);
376
377 return (0);
378}
379
380/*
381 * Read a sequence of bytes from NVRAM.
382 */
383static int
384bnx_read_nvram(struct bnx_softc *sc, caddr_t dest, int off, int cnt)
385{
386 int err = 0, i;
387 uint8_t byte = 0;
388
389 if (sc->bnx_asicrev != BGE_ASICREV_BCM5906)
390 return (1);
391
392 for (i = 0; i < cnt; i++) {
393 err = bnx_nvram_getbyte(sc, off + i, &byte);
394 if (err)
395 break;
396 *(dest + i) = byte;
397 }
398
399 return (err ? 1 : 0);
400}
401
402/*
403 * Read a byte of data stored in the EEPROM at address 'addr.' The
404 * BCM570x supports both the traditional bitbang interface and an
405 * auto access interface for reading the EEPROM. We use the auto
406 * access method.
407 */
408static uint8_t
409bnx_eeprom_getbyte(struct bnx_softc *sc, uint32_t addr, uint8_t *dest)
410{
411 int i;
412 uint32_t byte = 0;
413
414 /*
415 * Enable use of auto EEPROM access so we can avoid
416 * having to use the bitbang method.
417 */
418 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
419
420 /* Reset the EEPROM, load the clock period. */
421 CSR_WRITE_4(sc, BGE_EE_ADDR,
422 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
423 DELAY(20);
424
425 /* Issue the read EEPROM command. */
426 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
427
428 /* Wait for completion */
429 for(i = 0; i < BNX_TIMEOUT * 10; i++) {
430 DELAY(10);
431 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
432 break;
433 }
434
435 if (i == BNX_TIMEOUT) {
436 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
437 return(1);
438 }
439
440 /* Get result. */
441 byte = CSR_READ_4(sc, BGE_EE_DATA);
442
443 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
444
445 return(0);
446}
447
448/*
449 * Read a sequence of bytes from the EEPROM.
450 */
451static int
452bnx_read_eeprom(struct bnx_softc *sc, caddr_t dest, uint32_t off, size_t len)
453{
454 size_t i;
455 int err;
456 uint8_t byte;
457
458 for (byte = 0, err = 0, i = 0; i < len; i++) {
459 err = bnx_eeprom_getbyte(sc, off + i, &byte);
460 if (err)
461 break;
462 *(dest + i) = byte;
463 }
464
465 return(err ? 1 : 0);
466}
467
468static int
469bnx_miibus_readreg(device_t dev, int phy, int reg)
470{
471 struct bnx_softc *sc = device_get_softc(dev);
472 uint32_t val;
473 int i;
474
475 KASSERT(phy == sc->bnx_phyno,
476 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno));
477
478 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
479 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
480 CSR_WRITE_4(sc, BGE_MI_MODE,
481 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
482 DELAY(80);
483 }
484
485 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
486 BGE_MIPHY(phy) | BGE_MIREG(reg));
487
488 /* Poll for the PHY register access to complete. */
489 for (i = 0; i < BNX_TIMEOUT; i++) {
490 DELAY(10);
491 val = CSR_READ_4(sc, BGE_MI_COMM);
492 if ((val & BGE_MICOMM_BUSY) == 0) {
493 DELAY(5);
494 val = CSR_READ_4(sc, BGE_MI_COMM);
495 break;
496 }
497 }
498 if (i == BNX_TIMEOUT) {
499 if_printf(&sc->arpcom.ac_if, "PHY read timed out "
500 "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val);
501 val = 0;
502 }
503
504 /* Restore the autopoll bit if necessary. */
505 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
506 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
507 DELAY(80);
508 }
509
510 if (val & BGE_MICOMM_READFAIL)
511 return 0;
512
513 return (val & 0xFFFF);
514}
515
516static int
517bnx_miibus_writereg(device_t dev, int phy, int reg, int val)
518{
519 struct bnx_softc *sc = device_get_softc(dev);
520 int i;
521
522 KASSERT(phy == sc->bnx_phyno,
523 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno));
524
525 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
526 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
527 return 0;
528
529 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
530 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
531 CSR_WRITE_4(sc, BGE_MI_MODE,
532 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
533 DELAY(80);
534 }
535
536 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
537 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
538
539 for (i = 0; i < BNX_TIMEOUT; i++) {
540 DELAY(10);
541 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
542 DELAY(5);
543 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
544 break;
545 }
546 }
547 if (i == BNX_TIMEOUT) {
548 if_printf(&sc->arpcom.ac_if, "PHY write timed out "
549 "(phy %d, reg %d, val %d)\n", phy, reg, val);
550 }
551
552 /* Restore the autopoll bit if necessary. */
553 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
554 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
555 DELAY(80);
556 }
557
558 return 0;
559}
560
561static void
562bnx_miibus_statchg(device_t dev)
563{
564 struct bnx_softc *sc;
565 struct mii_data *mii;
566
567 sc = device_get_softc(dev);
568 mii = device_get_softc(sc->bnx_miibus);
569
570 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
571 (IFM_ACTIVE | IFM_AVALID)) {
572 switch (IFM_SUBTYPE(mii->mii_media_active)) {
573 case IFM_10_T:
574 case IFM_100_TX:
575 sc->bnx_link = 1;
576 break;
577 case IFM_1000_T:
578 case IFM_1000_SX:
579 case IFM_2500_SX:
580 if (sc->bnx_asicrev != BGE_ASICREV_BCM5906)
581 sc->bnx_link = 1;
582 else
583 sc->bnx_link = 0;
584 break;
585 default:
586 sc->bnx_link = 0;
587 break;
588 }
589 } else {
590 sc->bnx_link = 0;
591 }
592 if (sc->bnx_link == 0)
593 return;
594
595 BNX_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
596 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
597 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
598 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
599 } else {
600 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
601 }
602
603 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
604 BNX_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
605 } else {
606 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
607 }
608}
609
610/*
611 * Memory management for jumbo frames.
612 */
613static int
614bnx_alloc_jumbo_mem(struct bnx_softc *sc)
615{
616 struct ifnet *ifp = &sc->arpcom.ac_if;
617 struct bnx_jslot *entry;
618 uint8_t *ptr;
619 bus_addr_t paddr;
620 int i, error;
621
622 /*
623 * Create tag for jumbo mbufs.
624 * This is really a bit of a kludge. We allocate a special
625 * jumbo buffer pool which (thanks to the way our DMA
626 * memory allocation works) will consist of contiguous
627 * pages. This means that even though a jumbo buffer might
628 * be larger than a page size, we don't really need to
629 * map it into more than one DMA segment. However, the
630 * default mbuf tag will result in multi-segment mappings,
631 * so we have to create a special jumbo mbuf tag that
632 * lets us get away with mapping the jumbo buffers as
633 * a single segment. I think eventually the driver should
634 * be changed so that it uses ordinary mbufs and cluster
635 * buffers, i.e. jumbo frames can span multiple DMA
636 * descriptors. But that's a project for another day.
637 */
638
639 /*
640 * Create DMA stuffs for jumbo RX ring.
641 */
642 error = bnx_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ,
643 &sc->bnx_cdata.bnx_rx_jumbo_ring_tag,
644 &sc->bnx_cdata.bnx_rx_jumbo_ring_map,
645 (void *)&sc->bnx_ldata.bnx_rx_jumbo_ring,
646 &sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
647 if (error) {
648 if_printf(ifp, "could not create jumbo RX ring\n");
649 return error;
650 }
651
652 /*
653 * Create DMA stuffs for jumbo buffer block.
654 */
655 error = bnx_dma_block_alloc(sc, BNX_JMEM,
656 &sc->bnx_cdata.bnx_jumbo_tag,
657 &sc->bnx_cdata.bnx_jumbo_map,
658 (void **)&sc->bnx_ldata.bnx_jumbo_buf,
659 &paddr);
660 if (error) {
661 if_printf(ifp, "could not create jumbo buffer\n");
662 return error;
663 }
664
665 SLIST_INIT(&sc->bnx_jfree_listhead);
666
667 /*
668 * Now divide it up into 9K pieces and save the addresses
669 * in an array. Note that we play an evil trick here by using
670 * the first few bytes in the buffer to hold the the address
671 * of the softc structure for this interface. This is because
672 * bnx_jfree() needs it, but it is called by the mbuf management
673 * code which will not pass it to us explicitly.
674 */
675 for (i = 0, ptr = sc->bnx_ldata.bnx_jumbo_buf; i < BNX_JSLOTS; i++) {
676 entry = &sc->bnx_cdata.bnx_jslots[i];
677 entry->bnx_sc = sc;
678 entry->bnx_buf = ptr;
679 entry->bnx_paddr = paddr;
680 entry->bnx_inuse = 0;
681 entry->bnx_slot = i;
682 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead, entry, jslot_link);
683
684 ptr += BNX_JLEN;
685 paddr += BNX_JLEN;
686 }
687 return 0;
688}
689
690static void
691bnx_free_jumbo_mem(struct bnx_softc *sc)
692{
693 /* Destroy jumbo RX ring. */
694 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_jumbo_ring_tag,
695 sc->bnx_cdata.bnx_rx_jumbo_ring_map,
696 sc->bnx_ldata.bnx_rx_jumbo_ring);
697
698 /* Destroy jumbo buffer block. */
699 bnx_dma_block_free(sc->bnx_cdata.bnx_jumbo_tag,
700 sc->bnx_cdata.bnx_jumbo_map,
701 sc->bnx_ldata.bnx_jumbo_buf);
702}
703
704/*
705 * Allocate a jumbo buffer.
706 */
707static struct bnx_jslot *
708bnx_jalloc(struct bnx_softc *sc)
709{
710 struct bnx_jslot *entry;
711
712 lwkt_serialize_enter(&sc->bnx_jslot_serializer);
713 entry = SLIST_FIRST(&sc->bnx_jfree_listhead);
714 if (entry) {
715 SLIST_REMOVE_HEAD(&sc->bnx_jfree_listhead, jslot_link);
716 entry->bnx_inuse = 1;
717 } else {
718 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
719 }
720 lwkt_serialize_exit(&sc->bnx_jslot_serializer);
721 return(entry);
722}
723
724/*
725 * Adjust usage count on a jumbo buffer.
726 */
727static void
728bnx_jref(void *arg)
729{
730 struct bnx_jslot *entry = (struct bnx_jslot *)arg;
731 struct bnx_softc *sc = entry->bnx_sc;
732
733 if (sc == NULL)
734 panic("bnx_jref: can't find softc pointer!");
735
736 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) {
737 panic("bnx_jref: asked to reference buffer "
738 "that we don't manage!");
739 } else if (entry->bnx_inuse == 0) {
740 panic("bnx_jref: buffer already free!");
741 } else {
742 atomic_add_int(&entry->bnx_inuse, 1);
743 }
744}
745
746/*
747 * Release a jumbo buffer.
748 */
749static void
750bnx_jfree(void *arg)
751{
752 struct bnx_jslot *entry = (struct bnx_jslot *)arg;
753 struct bnx_softc *sc = entry->bnx_sc;
754
755 if (sc == NULL)
756 panic("bnx_jfree: can't find softc pointer!");
757
758 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) {
759 panic("bnx_jfree: asked to free buffer that we don't manage!");
760 } else if (entry->bnx_inuse == 0) {
761 panic("bnx_jfree: buffer already free!");
762 } else {
763 /*
764 * Possible MP race to 0, use the serializer. The atomic insn
765 * is still needed for races against bnx_jref().
766 */
767 lwkt_serialize_enter(&sc->bnx_jslot_serializer);
768 atomic_subtract_int(&entry->bnx_inuse, 1);
769 if (entry->bnx_inuse == 0) {
770 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead,
771 entry, jslot_link);
772 }
773 lwkt_serialize_exit(&sc->bnx_jslot_serializer);
774 }
775}
776
777
778/*
779 * Intialize a standard receive ring descriptor.
780 */
781static int
782bnx_newbuf_std(struct bnx_softc *sc, int i, int init)
783{
784 struct mbuf *m_new = NULL;
785 bus_dma_segment_t seg;
786 bus_dmamap_t map;
787 int error, nsegs;
788
789 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
790 if (m_new == NULL)
791 return ENOBUFS;
792 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
793 m_adj(m_new, ETHER_ALIGN);
794
795 error = bus_dmamap_load_mbuf_segment(sc->bnx_cdata.bnx_rx_mtag,
796 sc->bnx_cdata.bnx_rx_tmpmap, m_new,
797 &seg, 1, &nsegs, BUS_DMA_NOWAIT);
798 if (error) {
799 m_freem(m_new);
800 return error;
801 }
802
803 if (!init) {
804 bus_dmamap_sync(sc->bnx_cdata.bnx_rx_mtag,
805 sc->bnx_cdata.bnx_rx_std_dmamap[i],
806 BUS_DMASYNC_POSTREAD);
807 bus_dmamap_unload(sc->bnx_cdata.bnx_rx_mtag,
808 sc->bnx_cdata.bnx_rx_std_dmamap[i]);
809 }
810
811 map = sc->bnx_cdata.bnx_rx_tmpmap;
812 sc->bnx_cdata.bnx_rx_tmpmap = sc->bnx_cdata.bnx_rx_std_dmamap[i];
813 sc->bnx_cdata.bnx_rx_std_dmamap[i] = map;
814
815 sc->bnx_cdata.bnx_rx_std_chain[i].bnx_mbuf = m_new;
816 sc->bnx_cdata.bnx_rx_std_chain[i].bnx_paddr = seg.ds_addr;
817
818 bnx_setup_rxdesc_std(sc, i);
819 return 0;
820}
821
822static void
823bnx_setup_rxdesc_std(struct bnx_softc *sc, int i)
824{
825 struct bnx_rxchain *rc;
826 struct bge_rx_bd *r;
827
828 rc = &sc->bnx_cdata.bnx_rx_std_chain[i];
829 r = &sc->bnx_ldata.bnx_rx_std_ring[i];
830
831 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_paddr);
832 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_paddr);
833 r->bge_len = rc->bnx_mbuf->m_len;
834 r->bge_idx = i;
835 r->bge_flags = BGE_RXBDFLAG_END;
836}
837
838/*
839 * Initialize a jumbo receive ring descriptor. This allocates
840 * a jumbo buffer from the pool managed internally by the driver.
841 */
842static int
843bnx_newbuf_jumbo(struct bnx_softc *sc, int i, int init)
844{
845 struct mbuf *m_new = NULL;
846 struct bnx_jslot *buf;
847 bus_addr_t paddr;
848
849 /* Allocate the mbuf. */
850 MGETHDR(m_new, init ? MB_WAIT : MB_DONTWAIT, MT_DATA);
851 if (m_new == NULL)
852 return ENOBUFS;
853
854 /* Allocate the jumbo buffer */
855 buf = bnx_jalloc(sc);
856 if (buf == NULL) {
857 m_freem(m_new);
858 return ENOBUFS;
859 }
860
861 /* Attach the buffer to the mbuf. */
862 m_new->m_ext.ext_arg = buf;
863 m_new->m_ext.ext_buf = buf->bnx_buf;
864 m_new->m_ext.ext_free = bnx_jfree;
865 m_new->m_ext.ext_ref = bnx_jref;
866 m_new->m_ext.ext_size = BNX_JUMBO_FRAMELEN;
867
868 m_new->m_flags |= M_EXT;
869
870 m_new->m_data = m_new->m_ext.ext_buf;
871 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
872
873 paddr = buf->bnx_paddr;
874 m_adj(m_new, ETHER_ALIGN);
875 paddr += ETHER_ALIGN;
876
877 /* Save necessary information */
878 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_mbuf = m_new;
879 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_paddr = paddr;
880
881 /* Set up the descriptor. */
882 bnx_setup_rxdesc_jumbo(sc, i);
883 return 0;
884}
885
886static void
887bnx_setup_rxdesc_jumbo(struct bnx_softc *sc, int i)
888{
889 struct bge_rx_bd *r;
890 struct bnx_rxchain *rc;
891
892 r = &sc->bnx_ldata.bnx_rx_jumbo_ring[i];
893 rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i];
894
895 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_paddr);
896 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_paddr);
897 r->bge_len = rc->bnx_mbuf->m_len;
898 r->bge_idx = i;
899 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
900}
901
902static int
903bnx_init_rx_ring_std(struct bnx_softc *sc)
904{
905 int i, error;
906
907 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
908 error = bnx_newbuf_std(sc, i, 1);
909 if (error)
910 return error;
911 };
912
913 sc->bnx_std = BGE_STD_RX_RING_CNT - 1;
914 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bnx_std);
915
916 return(0);
917}
918
919static void
920bnx_free_rx_ring_std(struct bnx_softc *sc)
921{
922 int i;
923
924 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
925 struct bnx_rxchain *rc = &sc->bnx_cdata.bnx_rx_std_chain[i];
926
927 if (rc->bnx_mbuf != NULL) {
928 bus_dmamap_unload(sc->bnx_cdata.bnx_rx_mtag,
929 sc->bnx_cdata.bnx_rx_std_dmamap[i]);
930 m_freem(rc->bnx_mbuf);
931 rc->bnx_mbuf = NULL;
932 }
933 bzero(&sc->bnx_ldata.bnx_rx_std_ring[i],
934 sizeof(struct bge_rx_bd));
935 }
936}
937
938static int
939bnx_init_rx_ring_jumbo(struct bnx_softc *sc)
940{
941 struct bge_rcb *rcb;
942 int i, error;
943
944 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
945 error = bnx_newbuf_jumbo(sc, i, 1);
946 if (error)
947 return error;
948 };
949
950 sc->bnx_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
951
952 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb;
953 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
954 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
955
956 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo);
957
958 return(0);
959}
960
961static void
962bnx_free_rx_ring_jumbo(struct bnx_softc *sc)
963{
964 int i;
965
966 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
967 struct bnx_rxchain *rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i];
968
969 if (rc->bnx_mbuf != NULL) {
970 m_freem(rc->bnx_mbuf);
971 rc->bnx_mbuf = NULL;
972 }
973 bzero(&sc->bnx_ldata.bnx_rx_jumbo_ring[i],
974 sizeof(struct bge_rx_bd));
975 }
976}
977
978static void
979bnx_free_tx_ring(struct bnx_softc *sc)
980{
981 int i;
982
983 for (i = 0; i < BGE_TX_RING_CNT; i++) {
984 if (sc->bnx_cdata.bnx_tx_chain[i] != NULL) {
985 bus_dmamap_unload(sc->bnx_cdata.bnx_tx_mtag,
986 sc->bnx_cdata.bnx_tx_dmamap[i]);
987 m_freem(sc->bnx_cdata.bnx_tx_chain[i]);
988 sc->bnx_cdata.bnx_tx_chain[i] = NULL;
989 }
990 bzero(&sc->bnx_ldata.bnx_tx_ring[i],
991 sizeof(struct bge_tx_bd));
992 }
993}
994
995static int
996bnx_init_tx_ring(struct bnx_softc *sc)
997{
998 sc->bnx_txcnt = 0;
999 sc->bnx_tx_saved_considx = 0;
1000 sc->bnx_tx_prodidx = 0;
1001
1002 /* Initialize transmit producer index for host-memory send ring. */
1003 bnx_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bnx_tx_prodidx);
1004 bnx_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1005
1006 return(0);
1007}
1008
1009static void
1010bnx_setmulti(struct bnx_softc *sc)
1011{
1012 struct ifnet *ifp;
1013 struct ifmultiaddr *ifma;
1014 uint32_t hashes[4] = { 0, 0, 0, 0 };
1015 int h, i;
1016
1017 ifp = &sc->arpcom.ac_if;
1018
1019 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1020 for (i = 0; i < 4; i++)
1021 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1022 return;
1023 }
1024
1025 /* First, zot all the existing filters. */
1026 for (i = 0; i < 4; i++)
1027 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1028
1029 /* Now program new ones. */
1030 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1031 if (ifma->ifma_addr->sa_family != AF_LINK)
1032 continue;
1033 h = ether_crc32_le(
1034 LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1035 ETHER_ADDR_LEN) & 0x7f;
1036 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1037 }
1038
1039 for (i = 0; i < 4; i++)
1040 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1041}
1042
1043/*
1044 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1045 * self-test results.
1046 */
1047static int
1048bnx_chipinit(struct bnx_softc *sc)
1049{
1050 uint32_t dma_rw_ctl, mode_ctl;
1051 int i;
1052
1053 /* Set endian type before we access any non-PCI registers. */
1054 pci_write_config(sc->bnx_dev, BGE_PCI_MISC_CTL,
1055 BGE_INIT | BGE_PCIMISCCTL_TAGGED_STATUS, 4);
1056
1057 /* Clear the MAC control register */
1058 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1059
1060 /*
1061 * Clear the MAC statistics block in the NIC's
1062 * internal memory.
1063 */
1064 for (i = BGE_STATS_BLOCK;
1065 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1066 BNX_MEMWIN_WRITE(sc, i, 0);
1067
1068 for (i = BGE_STATUS_BLOCK;
1069 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1070 BNX_MEMWIN_WRITE(sc, i, 0);
1071
d7872545
SZ
1072 if (BNX_IS_57765_FAMILY(sc)) {
1073 uint32_t val;
1074
1075 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) {
1076 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
1077 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS;
1078
1079 /* Access the lower 1K of PL PCI-E block registers. */
1080 CSR_WRITE_4(sc, BGE_MODE_CTL,
1081 val | BGE_MODECTL_PCIE_PL_SEL);
1082
1083 val = CSR_READ_4(sc, BGE_PCIE_PL_LO_PHYCTL5);
1084 val |= BGE_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ;
1085 CSR_WRITE_4(sc, BGE_PCIE_PL_LO_PHYCTL5, val);
1086
1087 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1088 }
1089 if (sc->bnx_chiprev != BGE_CHIPREV_57765_AX) {
1090 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
1091 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS;
1092
1093 /* Access the lower 1K of DL PCI-E block registers. */
1094 CSR_WRITE_4(sc, BGE_MODE_CTL,
1095 val | BGE_MODECTL_PCIE_DL_SEL);
1096
1097 val = CSR_READ_4(sc, BGE_PCIE_DL_LO_FTSMAX);
1098 val &= ~BGE_PCIE_DL_LO_FTSMAX_MASK;
1099 val |= BGE_PCIE_DL_LO_FTSMAX_VAL;
1100 CSR_WRITE_4(sc, BGE_PCIE_DL_LO_FTSMAX, val);
1101
1102 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1103 }
1104
1105 val = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK);
1106 val &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK;
1107 val |= BGE_CPMU_LSPD_10MB_MACCLK_6_25;
1108 CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, val);
1109 }
1110
2890cca3
SZ
1111 /*
1112 * Set up the PCI DMA control register.
1113 */
1114 dma_rw_ctl = pci_read_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, 4);
1115 /*
1116 * Disable 32bytes cache alignment for DMA write to host memory
1117 *
1118 * NOTE:
1119 * 64bytes cache alignment for DMA write to host memory is still
1120 * enabled.
1121 */
1122 dma_rw_ctl |= BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1123 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0)
1124 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1125 /*
1126 * Enable HW workaround for controllers that misinterpret
1127 * a status tag update and leave interrupts permanently
1128 * disabled.
1129 */
1130 if (sc->bnx_asicrev != BGE_ASICREV_BCM5717 &&
1131 !BNX_IS_57765_FAMILY(sc))
1132 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1133 if (bootverbose) {
1134 if_printf(&sc->arpcom.ac_if, "DMA read/write %#x\n",
1135 dma_rw_ctl);
6c8d8ecc
SZ
1136 }
1137 pci_write_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1138
1139 /*
1140 * Set up general mode register.
1141 */
1142 mode_ctl = bnx_dma_swap_options(sc) | BGE_MODECTL_MAC_ATTN_INTR |
1143 BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM;
1144 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1145
1146 /*
1147 * Disable memory write invalidate. Apparently it is not supported
1148 * properly by these devices. Also ensure that INTx isn't disabled,
1149 * as these chips need it even when using MSI.
1150 */
1151 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_CMD,
1152 (PCIM_CMD_MWRICEN | PCIM_CMD_INTxDIS), 4);
1153
1154 /* Set the timer prescaler (always 66Mhz) */
1155 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1156
1157 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
1158 DELAY(40); /* XXX */
1159
1160 /* Put PHY into ready state */
1161 BNX_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1162 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1163 DELAY(40);
1164 }
1165
1166 return(0);
1167}
1168
1169static int
1170bnx_blockinit(struct bnx_softc *sc)
1171{
1172 struct bge_rcb *rcb;
1173 bus_size_t vrcb;
1174 bge_hostaddr taddr;
1175 uint32_t val;
1176 int i, limit;
1177
1178 /*
1179 * Initialize the memory window pointer register so that
1180 * we can access the first 32K of internal NIC RAM. This will
1181 * allow us to set up the TX send ring RCBs and the RX return
1182 * ring RCBs, plus other things which live in NIC memory.
1183 */
1184 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1185
1186 /* Configure mbuf pool watermarks */
f368d0d9 1187 if (BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
1188 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1189 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) {
1190 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1191 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1192 } else {
1193 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1194 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1195 }
1196 } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
1197 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1198 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1199 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1200 } else {
1201 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1202 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1203 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1204 }
1205
1206 /* Configure DMA resource watermarks */
1207 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1208 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1209
1210 /* Enable buffer manager */
1211 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1212 /*
1213 * Change the arbitration algorithm of TXMBUF read request to
1214 * round-robin instead of priority based for BCM5719. When
1215 * TXFIFO is almost empty, RDMA will hold its request until
1216 * TXFIFO is not almost empty.
1217 */
1218 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719)
1219 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
e5eebe34
SZ
1220 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1221 sc->bnx_chipid == BGE_CHIPID_BCM5719_A0 ||
1222 sc->bnx_chipid == BGE_CHIPID_BCM5720_A0)
1223 val |= BGE_BMANMODE_LOMBUF_ATTN;
6c8d8ecc
SZ
1224 CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1225
1226 /* Poll for buffer manager start indication */
1227 for (i = 0; i < BNX_TIMEOUT; i++) {
1228 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1229 break;
1230 DELAY(10);
1231 }
1232
1233 if (i == BNX_TIMEOUT) {
1234 if_printf(&sc->arpcom.ac_if,
1235 "buffer manager failed to start\n");
1236 return(ENXIO);
1237 }
1238
1239 /* Enable flow-through queues */
1240 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1241 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1242
1243 /* Wait until queue initialization is complete */
1244 for (i = 0; i < BNX_TIMEOUT; i++) {
1245 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1246 break;
1247 DELAY(10);
1248 }
1249
1250 if (i == BNX_TIMEOUT) {
1251 if_printf(&sc->arpcom.ac_if,
1252 "flow-through queue init failed\n");
1253 return(ENXIO);
1254 }
1255
1256 /*
1257 * Summary of rings supported by the controller:
1258 *
1259 * Standard Receive Producer Ring
1260 * - This ring is used to feed receive buffers for "standard"
1261 * sized frames (typically 1536 bytes) to the controller.
1262 *
1263 * Jumbo Receive Producer Ring
1264 * - This ring is used to feed receive buffers for jumbo sized
1265 * frames (i.e. anything bigger than the "standard" frames)
1266 * to the controller.
1267 *
1268 * Mini Receive Producer Ring
1269 * - This ring is used to feed receive buffers for "mini"
1270 * sized frames to the controller.
1271 * - This feature required external memory for the controller
1272 * but was never used in a production system. Should always
1273 * be disabled.
1274 *
1275 * Receive Return Ring
1276 * - After the controller has placed an incoming frame into a
1277 * receive buffer that buffer is moved into a receive return
1278 * ring. The driver is then responsible to passing the
1279 * buffer up to the stack. Many versions of the controller
1280 * support multiple RR rings.
1281 *
1282 * Send Ring
1283 * - This ring is used for outgoing frames. Many versions of
1284 * the controller support multiple send rings.
1285 */
1286
1287 /* Initialize the standard receive producer ring control block. */
1288 rcb = &sc->bnx_ldata.bnx_info.bnx_std_rx_rcb;
1289 rcb->bge_hostaddr.bge_addr_lo =
1290 BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_std_ring_paddr);
1291 rcb->bge_hostaddr.bge_addr_hi =
1292 BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_std_ring_paddr);
f368d0d9 1293 if (BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
1294 /*
1295 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1296 * Bits 15-2 : Maximum RX frame size
1297 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
1298 * Bit 0 : Reserved
1299 */
1300 rcb->bge_maxlen_flags =
1301 BGE_RCB_MAXLEN_FLAGS(512, BNX_MAX_FRAMELEN << 2);
1302 } else {
1303 /*
1304 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1305 * Bits 15-2 : Reserved (should be 0)
1306 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1307 * Bit 0 : Reserved
1308 */
1309 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1310 }
303fdc72 1311 if (BNX_IS_5717_PLUS(sc))
6c8d8ecc
SZ
1312 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1313 else
1314 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1315 /* Write the standard receive producer ring control block. */
1316 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1317 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1318 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1319 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1320 /* Reset the standard receive producer ring producer index. */
1321 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1322
1323 /*
1324 * Initialize the jumbo RX producer ring control
1325 * block. We set the 'ring disabled' bit in the
1326 * flags field until we're actually ready to start
1327 * using this ring (i.e. once we set the MTU
1328 * high enough to require it).
1329 */
1330 if (BNX_IS_JUMBO_CAPABLE(sc)) {
1331 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb;
1332 /* Get the jumbo receive producer ring RCB parameters. */
1333 rcb->bge_hostaddr.bge_addr_lo =
1334 BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
1335 rcb->bge_hostaddr.bge_addr_hi =
1336 BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
1337 rcb->bge_maxlen_flags =
1338 BGE_RCB_MAXLEN_FLAGS(BNX_MAX_FRAMELEN,
1339 BGE_RCB_FLAG_RING_DISABLED);
303fdc72 1340 if (BNX_IS_5717_PLUS(sc))
6c8d8ecc
SZ
1341 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1342 else
1343 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1344 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1345 rcb->bge_hostaddr.bge_addr_hi);
1346 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1347 rcb->bge_hostaddr.bge_addr_lo);
1348 /* Program the jumbo receive producer ring RCB parameters. */
1349 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1350 rcb->bge_maxlen_flags);
1351 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1352 /* Reset the jumbo receive producer ring producer index. */
1353 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1354 }
1355
1356 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
1357 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
1358 (sc->bnx_chipid == BGE_CHIPID_BCM5906_A0 ||
1359 sc->bnx_chipid == BGE_CHIPID_BCM5906_A1 ||
1360 sc->bnx_chipid == BGE_CHIPID_BCM5906_A2)) {
1361 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1362 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1363 }
1364
1365 /*
1366 * The BD ring replenish thresholds control how often the
1367 * hardware fetches new BD's from the producer rings in host
1368 * memory. Setting the value too low on a busy system can
1369 * starve the hardware and recue the throughpout.
1370 *
1371 * Set the BD ring replentish thresholds. The recommended
1372 * values are 1/8th the number of descriptors allocated to
1373 * each ring.
1374 */
1375 val = 8;
1376 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1377 if (BNX_IS_JUMBO_CAPABLE(sc)) {
1378 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1379 BGE_JUMBO_RX_RING_CNT/8);
1380 }
f368d0d9 1381 if (BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
1382 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1383 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1384 }
1385
1386 /*
1387 * Disable all send rings by setting the 'ring disabled' bit
1388 * in the flags field of all the TX send ring control blocks,
1389 * located in NIC memory.
1390 */
80969639
SZ
1391 if (BNX_IS_5717_PLUS(sc))
1392 limit = 4;
4f23029e
SZ
1393 else if (BNX_IS_57765_FAMILY(sc))
1394 limit = 2;
80969639
SZ
1395 else
1396 limit = 1;
6c8d8ecc
SZ
1397 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1398 for (i = 0; i < limit; i++) {
1399 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1400 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1401 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1402 vrcb += sizeof(struct bge_rcb);
1403 }
1404
1405 /* Configure send ring RCB 0 (we use only the first ring) */
1406 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1407 BGE_HOSTADDR(taddr, sc->bnx_ldata.bnx_tx_ring_paddr);
1408 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1409 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
303fdc72 1410 if (BNX_IS_5717_PLUS(sc)) {
6c8d8ecc
SZ
1411 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
1412 } else {
1413 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1414 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1415 }
1416 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1417 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1418
1419 /*
1420 * Disable all receive return rings by setting the
1421 * 'ring disabled' bit in the flags field of all the receive
1422 * return ring control blocks, located in NIC memory.
1423 */
80969639 1424 if (BNX_IS_5717_PLUS(sc)) {
6c8d8ecc
SZ
1425 /* Should be 17, use 16 until we get an SRAM map. */
1426 limit = 16;
4f23029e 1427 } else if (BNX_IS_57765_FAMILY(sc)) {
6c8d8ecc
SZ
1428 limit = 4;
1429 } else {
1430 limit = 1;
1431 }
1432 /* Disable all receive return rings. */
1433 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1434 for (i = 0; i < limit; i++) {
1435 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1436 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1437 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1438 BGE_RCB_FLAG_RING_DISABLED);
1439 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1440 bnx_writembx(sc, BGE_MBX_RX_CONS0_LO +
1441 (i * (sizeof(uint64_t))), 0);
1442 vrcb += sizeof(struct bge_rcb);
1443 }
1444
1445 /*
1446 * Set up receive return ring 0. Note that the NIC address
1447 * for RX return rings is 0x0. The return rings live entirely
1448 * within the host, so the nicaddr field in the RCB isn't used.
1449 */
1450 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1451 BGE_HOSTADDR(taddr, sc->bnx_ldata.bnx_rx_return_ring_paddr);
1452 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1453 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1454 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1455 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1456 BGE_RCB_MAXLEN_FLAGS(sc->bnx_return_ring_cnt, 0));
1457
1458 /* Set random backoff seed for TX */
1459 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1460 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1461 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1462 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1463 BGE_TX_BACKOFF_SEED_MASK);
1464
1465 /* Set inter-packet gap */
1466 val = 0x2620;
1467 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1468 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
1469 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
1470 }
1471 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
1472
1473 /*
1474 * Specify which ring to use for packets that don't match
1475 * any RX rules.
1476 */
1477 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1478
1479 /*
1480 * Configure number of RX lists. One interrupt distribution
1481 * list, sixteen active lists, one bad frames class.
1482 */
1483 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1484
1485 /* Inialize RX list placement stats mask. */
1486 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1487 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1488
1489 /* Disable host coalescing until we get it set up */
1490 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1491
1492 /* Poll to make sure it's shut down. */
1493 for (i = 0; i < BNX_TIMEOUT; i++) {
1494 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1495 break;
1496 DELAY(10);
1497 }
1498
1499 if (i == BNX_TIMEOUT) {
1500 if_printf(&sc->arpcom.ac_if,
1501 "host coalescing engine failed to idle\n");
1502 return(ENXIO);
1503 }
1504
1505 /* Set up host coalescing defaults */
1506 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bnx_rx_coal_ticks);
1507 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bnx_tx_coal_ticks);
1508 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bnx_rx_coal_bds);
1509 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bnx_tx_coal_bds);
1510 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, sc->bnx_rx_coal_bds_int);
1511 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, sc->bnx_tx_coal_bds_int);
1512
1513 /* Set up address of status block */
1514 bzero(sc->bnx_ldata.bnx_status_block, BGE_STATUS_BLK_SZ);
1515 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1516 BGE_ADDR_HI(sc->bnx_ldata.bnx_status_block_paddr));
1517 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1518 BGE_ADDR_LO(sc->bnx_ldata.bnx_status_block_paddr));
1519
1520 /* Set up status block partail update size. */
1521 val = BGE_STATBLKSZ_32BYTE;
1522#if 0
1523 /*
1524 * Does not seem to have visible effect in both
1525 * bulk data (1472B UDP datagram) and tiny data
1526 * (18B UDP datagram) TX tests.
1527 */
1528 val |= BGE_HCCMODE_CLRTICK_TX;
1529#endif
1530 /* Turn on host coalescing state machine */
1531 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1532
1533 /* Turn on RX BD completion state machine and enable attentions */
1534 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1535 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1536
1537 /* Turn on RX list placement state machine */
1538 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1539
1540 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1541 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1542 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1543 BGE_MACMODE_FRMHDR_DMA_ENB;
1544
1545 if (sc->bnx_flags & BNX_FLAG_TBI)
1546 val |= BGE_PORTMODE_TBI;
1547 else if (sc->bnx_flags & BNX_FLAG_MII_SERDES)
1548 val |= BGE_PORTMODE_GMII;
1549 else
1550 val |= BGE_PORTMODE_MII;
1551
1552 /* Turn on DMA, clear stats */
1553 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1554
1555 /* Set misc. local control, enable interrupts on attentions */
1556 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1557
1558#ifdef notdef
1559 /* Assert GPIO pins for PHY reset */
1560 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1561 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1562 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1563 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1564#endif
1565
1566 /* Turn on write DMA state machine */
1567 val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1568 /* Enable host coalescing bug fix. */
1569 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1570 if (sc->bnx_asicrev == BGE_ASICREV_BCM5785) {
1571 /* Request larger DMA burst size to get better performance. */
1572 val |= BGE_WDMAMODE_BURST_ALL_DATA;
1573 }
1574 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1575 DELAY(40);
1576
3730a14d 1577 if (BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
1578 uint32_t dmactl;
1579
1580 dmactl = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL);
1581 /*
1582 * Adjust tx margin to prevent TX data corruption and
1583 * fix internal FIFO overflow.
1584 */
1585 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1586 sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1587 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
1588 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
1589 BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
1590 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
1591 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
1592 BGE_RDMA_RSRVCTRL_TXMRGN_320B;
1593 }
1594 /*
1595 * Enable fix for read DMA FIFO overruns.
1596 * The fix is to limit the number of RX BDs
1597 * the hardware would fetch at a fime.
1598 */
1599 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL,
1600 dmactl | BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
1601 }
1602
1603 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) {
1604 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
1605 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
1606 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
1607 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
1608 } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1609 /*
1610 * Allow 4KB burst length reads for non-LSO frames.
1611 * Enable 512B burst length reads for buffer descriptors.
1612 */
1613 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
1614 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
1615 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
1616 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
1617 }
1618
1619 /* Turn on read DMA state machine */
1620 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1621 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717)
1622 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
1623 if (sc->bnx_asicrev == BGE_ASICREV_BCM5784 ||
1624 sc->bnx_asicrev == BGE_ASICREV_BCM5785 ||
1625 sc->bnx_asicrev == BGE_ASICREV_BCM57780) {
1626 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1627 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1628 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1629 }
1630 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1631 val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
1632 BGE_RDMAMODE_H2BNC_VLAN_DET;
1633 /*
1634 * Allow multiple outstanding read requests from
1635 * non-LSO read DMA engine.
1636 */
1637 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
1638 }
66deb1c1
SZ
1639 if (sc->bnx_flags & BNX_FLAG_TSO)
1640 val |= BGE_RDMAMODE_TSO4_ENABLE;
6c8d8ecc
SZ
1641 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1642 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1643 DELAY(40);
1644
1645 /* Turn on RX data completion state machine */
1646 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1647
1648 /* Turn on RX BD initiator state machine */
1649 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1650
1651 /* Turn on RX data and RX BD initiator state machine */
1652 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1653
1654 /* Turn on send BD completion state machine */
1655 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1656
1657 /* Turn on send data completion state machine */
1658 val = BGE_SDCMODE_ENABLE;
1659 if (sc->bnx_asicrev == BGE_ASICREV_BCM5761)
1660 val |= BGE_SDCMODE_CDELAY;
1661 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1662
1663 /* Turn on send data initiator state machine */
66deb1c1
SZ
1664 if (sc->bnx_flags & BNX_FLAG_TSO) {
1665 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
1666 BGE_SDIMODE_HW_LSO_PRE_DMA);
1667 } else {
1668 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1669 }
6c8d8ecc
SZ
1670
1671 /* Turn on send BD initiator state machine */
1672 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1673
1674 /* Turn on send BD selector state machine */
1675 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1676
1677 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1678 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1679 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1680
1681 /* ack/clear link change events */
1682 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1683 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1684 BGE_MACSTAT_LINK_CHANGED);
1685 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1686
1687 /*
1688 * Enable attention when the link has changed state for
1689 * devices that use auto polling.
1690 */
1691 if (sc->bnx_flags & BNX_FLAG_TBI) {
1692 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1693 } else {
1694 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
1695 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
1696 DELAY(80);
1697 }
1698 }
1699
1700 /*
1701 * Clear any pending link state attention.
1702 * Otherwise some link state change events may be lost until attention
1703 * is cleared by bnx_intr() -> bnx_softc.bnx_link_upd() sequence.
1704 * It's not necessary on newer BCM chips - perhaps enabling link
1705 * state change attentions implies clearing pending attention.
1706 */
1707 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1708 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1709 BGE_MACSTAT_LINK_CHANGED);
1710
1711 /* Enable link state change attentions. */
1712 BNX_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1713
1714 return(0);
1715}
1716
1717/*
1718 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1719 * against our list and return its name if we find a match. Note
1720 * that since the Broadcom controller contains VPD support, we
1721 * can get the device name string from the controller itself instead
1722 * of the compiled-in string. This is a little slow, but it guarantees
1723 * we'll always announce the right product name.
1724 */
1725static int
1726bnx_probe(device_t dev)
1727{
1728 const struct bnx_type *t;
1729 uint16_t product, vendor;
1730
1731 if (!pci_is_pcie(dev))
1732 return ENXIO;
1733
1734 product = pci_get_device(dev);
1735 vendor = pci_get_vendor(dev);
1736
1737 for (t = bnx_devs; t->bnx_name != NULL; t++) {
1738 if (vendor == t->bnx_vid && product == t->bnx_did)
1739 break;
1740 }
1741 if (t->bnx_name == NULL)
1742 return ENXIO;
1743
1744 device_set_desc(dev, t->bnx_name);
1745 return 0;
1746}
1747
1748static int
1749bnx_attach(device_t dev)
1750{
1751 struct ifnet *ifp;
1752 struct bnx_softc *sc;
1753 uint32_t hwcfg = 0, misccfg;
1754 int error = 0, rid, capmask;
1755 uint8_t ether_addr[ETHER_ADDR_LEN];
1756 uint16_t product, vendor;
1757 driver_intr_t *intr_func;
1758 uintptr_t mii_priv = 0;
1759 u_int intr_flags;
66deb1c1
SZ
1760#ifdef BNX_TSO_DEBUG
1761 char desc[32];
1762 int i;
1763#endif
6c8d8ecc
SZ
1764
1765 sc = device_get_softc(dev);
1766 sc->bnx_dev = dev;
50668ed5 1767 callout_init_mp(&sc->bnx_stat_timer);
df9ccc98 1768 callout_init_mp(&sc->bnx_intr_timer);
6c8d8ecc
SZ
1769 lwkt_serialize_init(&sc->bnx_jslot_serializer);
1770
1771 product = pci_get_device(dev);
1772 vendor = pci_get_vendor(dev);
1773
1774#ifndef BURN_BRIDGES
1775 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1776 uint32_t irq, mem;
1777
1778 irq = pci_read_config(dev, PCIR_INTLINE, 4);
1779 mem = pci_read_config(dev, BGE_PCI_BAR0, 4);
1780
1781 device_printf(dev, "chip is in D%d power mode "
1782 "-- setting to D0\n", pci_get_powerstate(dev));
1783
1784 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1785
1786 pci_write_config(dev, PCIR_INTLINE, irq, 4);
1787 pci_write_config(dev, BGE_PCI_BAR0, mem, 4);
1788 }
1789#endif /* !BURN_BRIDGE */
1790
1791 /*
1792 * Map control/status registers.
1793 */
1794 pci_enable_busmaster(dev);
1795
1796 rid = BGE_PCI_BAR0;
1797 sc->bnx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1798 RF_ACTIVE);
1799
1800 if (sc->bnx_res == NULL) {
1801 device_printf(dev, "couldn't map memory\n");
1802 return ENXIO;
1803 }
1804
1805 sc->bnx_btag = rman_get_bustag(sc->bnx_res);
1806 sc->bnx_bhandle = rman_get_bushandle(sc->bnx_res);
1807
1808 /* Save various chip information */
1809 sc->bnx_chipid =
1810 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
1811 BGE_PCIMISCCTL_ASICREV_SHIFT;
1812 if (BGE_ASICREV(sc->bnx_chipid) == BGE_ASICREV_USE_PRODID_REG) {
1813 /* All chips having dedicated ASICREV register have CPMU */
1814 sc->bnx_flags |= BNX_FLAG_CPMU;
1815
1816 switch (product) {
1817 case PCI_PRODUCT_BROADCOM_BCM5717:
1818 case PCI_PRODUCT_BROADCOM_BCM5718:
1819 case PCI_PRODUCT_BROADCOM_BCM5719:
1820 case PCI_PRODUCT_BROADCOM_BCM5720_ALT:
1821 sc->bnx_chipid = pci_read_config(dev,
1822 BGE_PCI_GEN2_PRODID_ASICREV, 4);
1823 break;
1824
1825 case PCI_PRODUCT_BROADCOM_BCM57761:
32ff3c80 1826 case PCI_PRODUCT_BROADCOM_BCM57762:
6c8d8ecc 1827 case PCI_PRODUCT_BROADCOM_BCM57765:
32ff3c80 1828 case PCI_PRODUCT_BROADCOM_BCM57766:
6c8d8ecc 1829 case PCI_PRODUCT_BROADCOM_BCM57781:
32ff3c80 1830 case PCI_PRODUCT_BROADCOM_BCM57782:
6c8d8ecc 1831 case PCI_PRODUCT_BROADCOM_BCM57785:
32ff3c80 1832 case PCI_PRODUCT_BROADCOM_BCM57786:
6c8d8ecc
SZ
1833 case PCI_PRODUCT_BROADCOM_BCM57791:
1834 case PCI_PRODUCT_BROADCOM_BCM57795:
1835 sc->bnx_chipid = pci_read_config(dev,
1836 BGE_PCI_GEN15_PRODID_ASICREV, 4);
1837 break;
1838
1839 default:
1840 sc->bnx_chipid = pci_read_config(dev,
1841 BGE_PCI_PRODID_ASICREV, 4);
1842 break;
1843 }
1844 }
1845 sc->bnx_asicrev = BGE_ASICREV(sc->bnx_chipid);
1846 sc->bnx_chiprev = BGE_CHIPREV(sc->bnx_chipid);
1847
1848 switch (sc->bnx_asicrev) {
1849 case BGE_ASICREV_BCM5717:
1850 case BGE_ASICREV_BCM5719:
1851 case BGE_ASICREV_BCM5720:
f368d0d9
SZ
1852 sc->bnx_flags |= BNX_FLAG_5717_PLUS | BNX_FLAG_57765_PLUS;
1853 break;
1854
6c8d8ecc 1855 case BGE_ASICREV_BCM57765:
32ff3c80 1856 case BGE_ASICREV_BCM57766:
f368d0d9 1857 sc->bnx_flags |= BNX_FLAG_57765_FAMILY | BNX_FLAG_57765_PLUS;
6c8d8ecc
SZ
1858 break;
1859 }
1860 sc->bnx_flags |= BNX_FLAG_SHORTDMA;
1861
66deb1c1
SZ
1862 sc->bnx_flags |= BNX_FLAG_TSO;
1863 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 &&
1864 sc->bnx_chipid == BGE_CHIPID_BCM5719_A0)
1865 sc->bnx_flags &= ~BNX_FLAG_TSO;
1866
df9ccc98
SZ
1867 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1868 BNX_IS_57765_FAMILY(sc)) {
1869 /*
1870 * All BCM57785 and BCM5718 families chips have a bug that
1871 * under certain situation interrupt will not be enabled
1872 * even if status tag is written to BGE_MBX_IRQ0_LO mailbox.
1873 *
1874 * While BCM5719 and BCM5720 have a hardware workaround
1875 * which could fix the above bug.
1876 * See the comment near BGE_PCIDMARWCTL_TAGGED_STATUS_WA in
1877 * bnx_chipinit().
1878 *
1879 * For the rest of the chips in these two families, we will
1880 * have to poll the status block at high rate (10ms currently)
1881 * to check whether the interrupt is hosed or not.
1882 * See bnx_intr_check() for details.
1883 */
1884 sc->bnx_flags |= BNX_FLAG_STATUSTAG_BUG;
1885 }
1886
6c8d8ecc
SZ
1887 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK;
1888
1889 sc->bnx_pciecap = pci_get_pciecap_ptr(sc->bnx_dev);
1890 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1891 sc->bnx_asicrev == BGE_ASICREV_BCM5720)
1892 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_2048);
1893 else
1894 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096);
1895 device_printf(dev, "CHIP ID 0x%08x; "
1896 "ASIC REV 0x%02x; CHIP REV 0x%02x\n",
1897 sc->bnx_chipid, sc->bnx_asicrev, sc->bnx_chiprev);
1898
1899 /*
1900 * Set various PHY quirk flags.
1901 */
1902
1903 capmask = MII_CAPMASK_DEFAULT;
46283a40
SZ
1904 if (product == PCI_PRODUCT_BROADCOM_BCM57791 ||
1905 product == PCI_PRODUCT_BROADCOM_BCM57795) {
6c8d8ecc
SZ
1906 /* 10/100 only */
1907 capmask &= ~BMSR_EXTSTAT;
1908 }
1909
1910 mii_priv |= BRGPHY_FLAG_WIRESPEED;
1911
6c8d8ecc
SZ
1912 /*
1913 * Allocate interrupt
1914 */
1915 sc->bnx_irq_type = pci_alloc_1intr(dev, bnx_msi_enable, &sc->bnx_irq_rid,
1916 &intr_flags);
1917
1918 sc->bnx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->bnx_irq_rid,
1919 intr_flags);
1920 if (sc->bnx_irq == NULL) {
1921 device_printf(dev, "couldn't map interrupt\n");
1922 error = ENXIO;
1923 goto fail;
1924 }
1925
1926 if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) {
1927 sc->bnx_flags |= BNX_FLAG_ONESHOT_MSI;
1928 bnx_enable_msi(sc);
1929 }
1930
1931 /* Initialize if_name earlier, so if_printf could be used */
1932 ifp = &sc->arpcom.ac_if;
1933 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1934
1935 /* Try to reset the chip. */
1936 bnx_reset(sc);
1937
1938 if (bnx_chipinit(sc)) {
1939 device_printf(dev, "chip initialization failed\n");
1940 error = ENXIO;
1941 goto fail;
1942 }
1943
1944 /*
1945 * Get station address
1946 */
1947 error = bnx_get_eaddr(sc, ether_addr);
1948 if (error) {
1949 device_printf(dev, "failed to read station address\n");
1950 goto fail;
1951 }
1952
f368d0d9 1953 if (BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
1954 sc->bnx_return_ring_cnt = BGE_RETURN_RING_CNT;
1955 } else {
1956 /* 5705/5750 limits RX return ring to 512 entries. */
1957 sc->bnx_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1958 }
1959
1960 error = bnx_dma_alloc(sc);
1961 if (error)
1962 goto fail;
1963
1964 /* Set default tuneable values. */
1965 sc->bnx_rx_coal_ticks = BNX_RX_COAL_TICKS_DEF;
1966 sc->bnx_tx_coal_ticks = BNX_TX_COAL_TICKS_DEF;
1967 sc->bnx_rx_coal_bds = BNX_RX_COAL_BDS_DEF;
1968 sc->bnx_tx_coal_bds = BNX_TX_COAL_BDS_DEF;
306e5498
SZ
1969 sc->bnx_rx_coal_bds_int = BNX_RX_COAL_BDS_INT_DEF;
1970 sc->bnx_tx_coal_bds_int = BNX_TX_COAL_BDS_INT_DEF;
6c8d8ecc
SZ
1971
1972 /* Set up ifnet structure */
1973 ifp->if_softc = sc;
1974 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1975 ifp->if_ioctl = bnx_ioctl;
1976 ifp->if_start = bnx_start;
39a8d43a
SZ
1977#ifdef IFPOLL_ENABLE
1978 ifp->if_npoll = bnx_npoll;
6c8d8ecc
SZ
1979#endif
1980 ifp->if_watchdog = bnx_watchdog;
1981 ifp->if_init = bnx_init;
1982 ifp->if_mtu = ETHERMTU;
1983 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1984 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1985 ifq_set_ready(&ifp->if_snd);
1986
1987 ifp->if_capabilities |= IFCAP_HWCSUM;
1988 ifp->if_hwassist = BNX_CSUM_FEATURES;
66deb1c1
SZ
1989 if (sc->bnx_flags & BNX_FLAG_TSO) {
1990 ifp->if_capabilities |= IFCAP_TSO;
1991 ifp->if_hwassist |= CSUM_TSO;
1992 }
6c8d8ecc
SZ
1993 ifp->if_capenable = ifp->if_capabilities;
1994
1995 /*
1996 * Figure out what sort of media we have by checking the
1997 * hardware config word in the first 32k of NIC internal memory,
1998 * or fall back to examining the EEPROM if necessary.
1999 * Note: on some BCM5700 cards, this value appears to be unset.
2000 * If that's the case, we have to rely on identifying the NIC
2001 * by its PCI subsystem ID, as we do below for the SysKonnect
2002 * SK-9D41.
2003 */
2004 if (bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) {
2005 hwcfg = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2006 } else {
2007 if (bnx_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2008 sizeof(hwcfg))) {
2009 device_printf(dev, "failed to read EEPROM\n");
2010 error = ENXIO;
2011 goto fail;
2012 }
2013 hwcfg = ntohl(hwcfg);
2014 }
2015
2016 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2017 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41 ||
2018 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2019 sc->bnx_flags |= BNX_FLAG_TBI;
2020
2021 /* Setup MI MODE */
2022 if (sc->bnx_flags & BNX_FLAG_CPMU)
2023 sc->bnx_mi_mode = BGE_MIMODE_500KHZ_CONST;
2024 else
2025 sc->bnx_mi_mode = BGE_MIMODE_BASE;
2026
2027 /* Setup link status update stuffs */
2028 if (sc->bnx_flags & BNX_FLAG_TBI) {
2029 sc->bnx_link_upd = bnx_tbi_link_upd;
2030 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
2031 } else if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
2032 sc->bnx_link_upd = bnx_autopoll_link_upd;
2033 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
2034 } else {
2035 sc->bnx_link_upd = bnx_copper_link_upd;
2036 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
2037 }
2038
2039 /* Set default PHY address */
2040 sc->bnx_phyno = 1;
2041
2042 /*
2043 * PHY address mapping for various devices.
2044 *
2045 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2046 * ---------+-------+-------+-------+-------+
2047 * BCM57XX | 1 | X | X | X |
2048 * BCM5704 | 1 | X | 1 | X |
2049 * BCM5717 | 1 | 8 | 2 | 9 |
2050 * BCM5719 | 1 | 8 | 2 | 9 |
2051 * BCM5720 | 1 | 8 | 2 | 9 |
2052 *
2053 * Other addresses may respond but they are not
2054 * IEEE compliant PHYs and should be ignored.
2055 */
80969639 2056 if (BNX_IS_5717_PLUS(sc)) {
6c8d8ecc
SZ
2057 int f;
2058
2059 f = pci_get_function(dev);
2060 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_A0) {
2061 if (CSR_READ_4(sc, BGE_SGDIG_STS) &
2062 BGE_SGDIGSTS_IS_SERDES)
2063 sc->bnx_phyno = f + 8;
2064 else
2065 sc->bnx_phyno = f + 1;
2066 } else {
2067 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2068 BGE_CPMU_PHY_STRAP_IS_SERDES)
2069 sc->bnx_phyno = f + 8;
2070 else
2071 sc->bnx_phyno = f + 1;
2072 }
2073 }
2074
2075 if (sc->bnx_flags & BNX_FLAG_TBI) {
2076 ifmedia_init(&sc->bnx_ifmedia, IFM_IMASK,
2077 bnx_ifmedia_upd, bnx_ifmedia_sts);
2078 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2079 ifmedia_add(&sc->bnx_ifmedia,
2080 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2081 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2082 ifmedia_set(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO);
2083 sc->bnx_ifmedia.ifm_media = sc->bnx_ifmedia.ifm_cur->ifm_media;
2084 } else {
2085 struct mii_probe_args mii_args;
2086
2087 mii_probe_args_init(&mii_args, bnx_ifmedia_upd, bnx_ifmedia_sts);
2088 mii_args.mii_probemask = 1 << sc->bnx_phyno;
2089 mii_args.mii_capmask = capmask;
2090 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY;
2091 mii_args.mii_priv = mii_priv;
2092
2093 error = mii_probe(dev, &sc->bnx_miibus, &mii_args);
2094 if (error) {
2095 device_printf(dev, "MII without any PHY!\n");
2096 goto fail;
2097 }
2098 }
2099
2100 /*
2101 * Create sysctl nodes.
2102 */
2103 sysctl_ctx_init(&sc->bnx_sysctl_ctx);
2104 sc->bnx_sysctl_tree = SYSCTL_ADD_NODE(&sc->bnx_sysctl_ctx,
2105 SYSCTL_STATIC_CHILDREN(_hw),
2106 OID_AUTO,
2107 device_get_nameunit(dev),
2108 CTLFLAG_RD, 0, "");
2109 if (sc->bnx_sysctl_tree == NULL) {
2110 device_printf(dev, "can't add sysctl node\n");
2111 error = ENXIO;
2112 goto fail;
2113 }
2114
2115 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2116 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2117 OID_AUTO, "rx_coal_ticks",
2118 CTLTYPE_INT | CTLFLAG_RW,
2119 sc, 0, bnx_sysctl_rx_coal_ticks, "I",
2120 "Receive coalescing ticks (usec).");
2121 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2122 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2123 OID_AUTO, "tx_coal_ticks",
2124 CTLTYPE_INT | CTLFLAG_RW,
2125 sc, 0, bnx_sysctl_tx_coal_ticks, "I",
2126 "Transmit coalescing ticks (usec).");
2127 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2128 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2129 OID_AUTO, "rx_coal_bds",
2130 CTLTYPE_INT | CTLFLAG_RW,
2131 sc, 0, bnx_sysctl_rx_coal_bds, "I",
2132 "Receive max coalesced BD count.");
2133 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2134 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2135 OID_AUTO, "tx_coal_bds",
2136 CTLTYPE_INT | CTLFLAG_RW,
2137 sc, 0, bnx_sysctl_tx_coal_bds, "I",
2138 "Transmit max coalesced BD count.");
2139 /*
2140 * A common design characteristic for many Broadcom
2141 * client controllers is that they only support a
2142 * single outstanding DMA read operation on the PCIe
2143 * bus. This means that it will take twice as long to
2144 * fetch a TX frame that is split into header and
2145 * payload buffers as it does to fetch a single,
2146 * contiguous TX frame (2 reads vs. 1 read). For these
2147 * controllers, coalescing buffers to reduce the number
2148 * of memory reads is effective way to get maximum
2149 * performance(about 940Mbps). Without collapsing TX
2150 * buffers the maximum TCP bulk transfer performance
2151 * is about 850Mbps. However forcing coalescing mbufs
2152 * consumes a lot of CPU cycles, so leave it off by
2153 * default.
2154 */
2155 SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx,
2156 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2157 "force_defrag", CTLFLAG_RW, &sc->bnx_force_defrag, 0,
2158 "Force defragment on TX path");
2159
2160 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2161 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2162 "rx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2163 sc, 0, bnx_sysctl_rx_coal_bds_int, "I",
2164 "Receive max coalesced BD count during interrupt.");
2165 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2166 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2167 "tx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2168 sc, 0, bnx_sysctl_tx_coal_bds_int, "I",
2169 "Transmit max coalesced BD count during interrupt.");
2170
66deb1c1
SZ
2171#ifdef BNX_TSO_DEBUG
2172 for (i = 0; i < BNX_TSO_NSTATS; ++i) {
2173 ksnprintf(desc, sizeof(desc), "tso%d", i + 1);
2174 SYSCTL_ADD_ULONG(&sc->bnx_sysctl_ctx,
2175 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2176 desc, CTLFLAG_RW, &sc->bnx_tsosegs[i], "");
2177 }
2178#endif
2179
6c8d8ecc
SZ
2180 /*
2181 * Call MI attach routine.
2182 */
2183 ether_ifattach(ifp, ether_addr, NULL);
2184
b5de76b1
SZ
2185#ifdef IFPOLL_ENABLE
2186 ifpoll_compat_setup(&sc->bnx_npoll,
2187 &sc->bnx_sysctl_ctx, sc->bnx_sysctl_tree,
2188 device_get_unit(dev), ifp->if_serializer);
2189#endif
2190
6c8d8ecc
SZ
2191 if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) {
2192 if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) {
2193 intr_func = bnx_msi_oneshot;
2194 if (bootverbose)
2195 device_printf(dev, "oneshot MSI\n");
2196 } else {
2197 intr_func = bnx_msi;
2198 }
2199 } else {
2200 intr_func = bnx_intr_legacy;
2201 }
2202 error = bus_setup_intr(dev, sc->bnx_irq, INTR_MPSAFE, intr_func, sc,
2203 &sc->bnx_intrhand, ifp->if_serializer);
2204 if (error) {
2205 ether_ifdetach(ifp);
2206 device_printf(dev, "couldn't set up irq\n");
2207 goto fail;
2208 }
2209
2210 ifp->if_cpuid = rman_get_cpuid(sc->bnx_irq);
2211 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
2212
8ca0f604 2213 sc->bnx_stat_cpuid = ifp->if_cpuid;
df9ccc98 2214 sc->bnx_intr_cpuid = ifp->if_cpuid;
8ca0f604 2215
6c8d8ecc
SZ
2216 return(0);
2217fail:
2218 bnx_detach(dev);
2219 return(error);
2220}
2221
2222static int
2223bnx_detach(device_t dev)
2224{
2225 struct bnx_softc *sc = device_get_softc(dev);
2226
2227 if (device_is_attached(dev)) {
2228 struct ifnet *ifp = &sc->arpcom.ac_if;
2229
2230 lwkt_serialize_enter(ifp->if_serializer);
2231 bnx_stop(sc);
2232 bnx_reset(sc);
2233 bus_teardown_intr(dev, sc->bnx_irq, sc->bnx_intrhand);
2234 lwkt_serialize_exit(ifp->if_serializer);
2235
2236 ether_ifdetach(ifp);
2237 }
2238
2239 if (sc->bnx_flags & BNX_FLAG_TBI)
2240 ifmedia_removeall(&sc->bnx_ifmedia);
2241 if (sc->bnx_miibus)
2242 device_delete_child(dev, sc->bnx_miibus);
2243 bus_generic_detach(dev);
2244
2245 if (sc->bnx_irq != NULL) {
2246 bus_release_resource(dev, SYS_RES_IRQ, sc->bnx_irq_rid,
2247 sc->bnx_irq);
2248 }
2249 if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI)
2250 pci_release_msi(dev);
2251
2252 if (sc->bnx_res != NULL) {
2253 bus_release_resource(dev, SYS_RES_MEMORY,
2254 BGE_PCI_BAR0, sc->bnx_res);
2255 }
2256
2257 if (sc->bnx_sysctl_tree != NULL)
2258 sysctl_ctx_free(&sc->bnx_sysctl_ctx);
2259
2260 bnx_dma_free(sc);
2261
2262 return 0;
2263}
2264
2265static void
2266bnx_reset(struct bnx_softc *sc)
2267{
2268 device_t dev;
2269 uint32_t cachesize, command, pcistate, reset;
2270 void (*write_op)(struct bnx_softc *, uint32_t, uint32_t);
2271 int i, val = 0;
2272 uint16_t devctl;
2273
2274 dev = sc->bnx_dev;
2275
2276 if (sc->bnx_asicrev != BGE_ASICREV_BCM5906)
2277 write_op = bnx_writemem_direct;
2278 else
2279 write_op = bnx_writereg_ind;
2280
2281 /* Save some important PCI state. */
2282 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2283 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2284 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2285
2286 pci_write_config(dev, BGE_PCI_MISC_CTL,
2287 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2288 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2289 BGE_PCIMISCCTL_TAGGED_STATUS, 4);
2290
2291 /* Disable fastboot on controllers that support it. */
2292 if (bootverbose)
2293 if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n");
2294 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2295
2296 /*
2297 * Write the magic number to SRAM at offset 0xB50.
2298 * When firmware finishes its initialization it will
2299 * write ~BGE_MAGIC_NUMBER to the same location.
2300 */
2301 bnx_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2302
2303 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2304
2305 /* XXX: Broadcom Linux driver. */
2306 /* Force PCI-E 1.0a mode */
3730a14d 2307 if (!BNX_IS_57765_PLUS(sc) &&
6c8d8ecc
SZ
2308 CSR_READ_4(sc, BGE_PCIE_PHY_TSTCTL) ==
2309 (BGE_PCIE_PHY_TSTCTL_PSCRAM |
2310 BGE_PCIE_PHY_TSTCTL_PCIE10)) {
2311 CSR_WRITE_4(sc, BGE_PCIE_PHY_TSTCTL,
2312 BGE_PCIE_PHY_TSTCTL_PSCRAM);
2313 }
2314 if (sc->bnx_chipid != BGE_CHIPID_BCM5750_A0) {
2315 /* Prevent PCIE link training during global reset */
2316 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2317 reset |= (1<<29);
2318 }
2319
2320 /*
2321 * Set GPHY Power Down Override to leave GPHY
2322 * powered up in D0 uninitialized.
2323 */
2324 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0)
2325 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
2326
2327 /* Issue global reset */
2328 write_op(sc, BGE_MISC_CFG, reset);
2329
2330 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
2331 uint32_t status, ctrl;
2332
2333 status = CSR_READ_4(sc, BGE_VCPU_STATUS);
2334 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
2335 status | BGE_VCPU_STATUS_DRV_RESET);
2336 ctrl = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
2337 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
2338 ctrl & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
2339 }
2340
2341 DELAY(1000);
2342
2343 /* XXX: Broadcom Linux driver. */
2344 if (sc->bnx_chipid == BGE_CHIPID_BCM5750_A0) {
2345 uint32_t v;
2346
2347 DELAY(500000); /* wait for link training to complete */
2348 v = pci_read_config(dev, 0xc4, 4);
2349 pci_write_config(dev, 0xc4, v | (1<<15), 4);
2350 }
2351
2352 devctl = pci_read_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL, 2);
2353
2354 /* Disable no snoop and disable relaxed ordering. */
2355 devctl &= ~(PCIEM_DEVCTL_RELAX_ORDER | PCIEM_DEVCTL_NOSNOOP);
2356
2357 /* Old PCI-E chips only support 128 bytes Max PayLoad Size. */
2358 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0) {
2359 devctl &= ~PCIEM_DEVCTL_MAX_PAYLOAD_MASK;
2360 devctl |= PCIEM_DEVCTL_MAX_PAYLOAD_128;
2361 }
2362
2363 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL,
2364 devctl, 2);
2365
2366 /* Clear error status. */
2367 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVSTS,
2368 PCIEM_DEVSTS_CORR_ERR |
2369 PCIEM_DEVSTS_NFATAL_ERR |
2370 PCIEM_DEVSTS_FATAL_ERR |
2371 PCIEM_DEVSTS_UNSUPP_REQ, 2);
2372
2373 /* Reset some of the PCI state that got zapped by reset */
2374 pci_write_config(dev, BGE_PCI_MISC_CTL,
2375 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2376 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2377 BGE_PCIMISCCTL_TAGGED_STATUS, 4);
2378 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2379 pci_write_config(dev, BGE_PCI_CMD, command, 4);
2380 write_op(sc, BGE_MISC_CFG, (65 << 1));
2381
2382 /* Enable memory arbiter */
2383 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2384
2385 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
2386 for (i = 0; i < BNX_TIMEOUT; i++) {
2387 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2388 if (val & BGE_VCPU_STATUS_INIT_DONE)
2389 break;
2390 DELAY(100);
2391 }
2392 if (i == BNX_TIMEOUT) {
2393 if_printf(&sc->arpcom.ac_if, "reset timed out\n");
2394 return;
2395 }
2396 } else {
2397 /*
2398 * Poll until we see the 1's complement of the magic number.
2399 * This indicates that the firmware initialization
2400 * is complete.
2401 */
2402 for (i = 0; i < BNX_FIRMWARE_TIMEOUT; i++) {
2403 val = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2404 if (val == ~BGE_MAGIC_NUMBER)
2405 break;
2406 DELAY(10);
2407 }
2408 if (i == BNX_FIRMWARE_TIMEOUT) {
2409 if_printf(&sc->arpcom.ac_if, "firmware handshake "
2410 "timed out, found 0x%08x\n", val);
2411 }
2412
2413 /* BCM57765 A0 needs additional time before accessing. */
2414 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0)
2415 DELAY(10 * 1000);
2416 }
2417
2418 /*
2419 * XXX Wait for the value of the PCISTATE register to
2420 * return to its original pre-reset state. This is a
2421 * fairly good indicator of reset completion. If we don't
2422 * wait for the reset to fully complete, trying to read
2423 * from the device's non-PCI registers may yield garbage
2424 * results.
2425 */
2426 for (i = 0; i < BNX_TIMEOUT; i++) {
2427 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2428 break;
2429 DELAY(10);
2430 }
2431
2432 /* Fix up byte swapping */
2433 CSR_WRITE_4(sc, BGE_MODE_CTL, bnx_dma_swap_options(sc));
2434
2435 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2436
2437 /*
2438 * The 5704 in TBI mode apparently needs some special
2439 * adjustment to insure the SERDES drive level is set
2440 * to 1.2V.
2441 */
2442 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704 &&
2443 (sc->bnx_flags & BNX_FLAG_TBI)) {
2444 uint32_t serdescfg;
2445
2446 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2447 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2448 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2449 }
2450
7892075d
SZ
2451 CSR_WRITE_4(sc, BGE_MI_MODE,
2452 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
2453 DELAY(80);
2454
6c8d8ecc 2455 /* XXX: Broadcom Linux driver. */
3730a14d 2456 if (!BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
2457 uint32_t v;
2458
2459 /* Enable Data FIFO protection. */
f1f34fc4
SZ
2460 v = CSR_READ_4(sc, BGE_PCIE_TLDLPL_PORT);
2461 CSR_WRITE_4(sc, BGE_PCIE_TLDLPL_PORT, v | (1 << 25));
6c8d8ecc
SZ
2462 }
2463
2464 DELAY(10000);
2465
2466 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
2467 BNX_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
2468 CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
2469 }
2470}
2471
2472/*
2473 * Frame reception handling. This is called if there's a frame
2474 * on the receive return list.
2475 *
2476 * Note: we have to be able to handle two possibilities here:
2477 * 1) the frame is from the jumbo recieve ring
2478 * 2) the frame is from the standard receive ring
2479 */
2480
2481static void
2482bnx_rxeof(struct bnx_softc *sc, uint16_t rx_prod)
2483{
2484 struct ifnet *ifp;
2485 int stdcnt = 0, jumbocnt = 0;
2486
2487 ifp = &sc->arpcom.ac_if;
2488
2489 while (sc->bnx_rx_saved_considx != rx_prod) {
2490 struct bge_rx_bd *cur_rx;
2491 uint32_t rxidx;
2492 struct mbuf *m = NULL;
2493 uint16_t vlan_tag = 0;
2494 int have_tag = 0;
2495
2496 cur_rx =
2497 &sc->bnx_ldata.bnx_rx_return_ring[sc->bnx_rx_saved_considx];
2498
2499 rxidx = cur_rx->bge_idx;
2500 BNX_INC(sc->bnx_rx_saved_considx, sc->bnx_return_ring_cnt);
2501
2502 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2503 have_tag = 1;
2504 vlan_tag = cur_rx->bge_vlan_tag;
2505 }
2506
2507 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2508 BNX_INC(sc->bnx_jumbo, BGE_JUMBO_RX_RING_CNT);
2509 jumbocnt++;
2510
2511 if (rxidx != sc->bnx_jumbo) {
2512 ifp->if_ierrors++;
2513 if_printf(ifp, "sw jumbo index(%d) "
2514 "and hw jumbo index(%d) mismatch, drop!\n",
2515 sc->bnx_jumbo, rxidx);
2516 bnx_setup_rxdesc_jumbo(sc, rxidx);
2517 continue;
2518 }
2519
2520 m = sc->bnx_cdata.bnx_rx_jumbo_chain[rxidx].bnx_mbuf;
2521 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2522 ifp->if_ierrors++;
2523 bnx_setup_rxdesc_jumbo(sc, sc->bnx_jumbo);
2524 continue;
2525 }
2526 if (bnx_newbuf_jumbo(sc, sc->bnx_jumbo, 0)) {
2527 ifp->if_ierrors++;
2528 bnx_setup_rxdesc_jumbo(sc, sc->bnx_jumbo);
2529 continue;
2530 }
2531 } else {
2532 BNX_INC(sc->bnx_std, BGE_STD_RX_RING_CNT);
2533 stdcnt++;
2534
2535 if (rxidx != sc->bnx_std) {
2536 ifp->if_ierrors++;
2537 if_printf(ifp, "sw std index(%d) "
2538 "and hw std index(%d) mismatch, drop!\n",
2539 sc->bnx_std, rxidx);
2540 bnx_setup_rxdesc_std(sc, rxidx);
2541 continue;
2542 }
2543
2544 m = sc->bnx_cdata.bnx_rx_std_chain[rxidx].bnx_mbuf;
2545 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2546 ifp->if_ierrors++;
2547 bnx_setup_rxdesc_std(sc, sc->bnx_std);
2548 continue;
2549 }
2550 if (bnx_newbuf_std(sc, sc->bnx_std, 0)) {
2551 ifp->if_ierrors++;
2552 bnx_setup_rxdesc_std(sc, sc->bnx_std);
2553 continue;
2554 }
2555 }
2556
2557 ifp->if_ipackets++;
2558 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2559 m->m_pkthdr.rcvif = ifp;
2560
2561 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2562 (cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
2563 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2564 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2565 if ((cur_rx->bge_error_flag &
2566 BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
2567 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2568 }
2569 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2570 m->m_pkthdr.csum_data =
2571 cur_rx->bge_tcp_udp_csum;
2572 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
2573 CSUM_PSEUDO_HDR;
2574 }
2575 }
2576
2577 /*
2578 * If we received a packet with a vlan tag, pass it
2579 * to vlan_input() instead of ether_input().
2580 */
2581 if (have_tag) {
2582 m->m_flags |= M_VLANTAG;
2583 m->m_pkthdr.ether_vlantag = vlan_tag;
2584 have_tag = vlan_tag = 0;
2585 }
2586 ifp->if_input(ifp, m);
2587 }
2588
2589 bnx_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bnx_rx_saved_considx);
2590 if (stdcnt)
2591 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bnx_std);
2592 if (jumbocnt)
2593 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo);
2594}
2595
2596static void
2597bnx_txeof(struct bnx_softc *sc, uint16_t tx_cons)
2598{
6c8d8ecc
SZ
2599 struct ifnet *ifp;
2600
2601 ifp = &sc->arpcom.ac_if;
2602
2603 /*
2604 * Go through our tx ring and free mbufs for those
2605 * frames that have been sent.
2606 */
2607 while (sc->bnx_tx_saved_considx != tx_cons) {
2608 uint32_t idx = 0;
2609
2610 idx = sc->bnx_tx_saved_considx;
6c8d8ecc 2611 if (sc->bnx_cdata.bnx_tx_chain[idx] != NULL) {
9a103adf 2612 ifp->if_opackets++;
6c8d8ecc
SZ
2613 bus_dmamap_unload(sc->bnx_cdata.bnx_tx_mtag,
2614 sc->bnx_cdata.bnx_tx_dmamap[idx]);
2615 m_freem(sc->bnx_cdata.bnx_tx_chain[idx]);
2616 sc->bnx_cdata.bnx_tx_chain[idx] = NULL;
2617 }
2618 sc->bnx_txcnt--;
2619 BNX_INC(sc->bnx_tx_saved_considx, BGE_TX_RING_CNT);
2620 }
2621
9a103adf 2622 if ((BGE_TX_RING_CNT - sc->bnx_txcnt) >=
6c8d8ecc
SZ
2623 (BNX_NSEG_RSVD + BNX_NSEG_SPARE))
2624 ifp->if_flags &= ~IFF_OACTIVE;
2625
2626 if (sc->bnx_txcnt == 0)
2627 ifp->if_timer = 0;
2628
2629 if (!ifq_is_empty(&ifp->if_snd))
2630 if_devstart(ifp);
2631}
2632
39a8d43a 2633#ifdef IFPOLL_ENABLE
6c8d8ecc
SZ
2634
2635static void
39a8d43a
SZ
2636bnx_npoll(struct ifnet *ifp, struct ifpoll_info *info)
2637{
2638 struct bnx_softc *sc = ifp->if_softc;
2639
2640 ASSERT_SERIALIZED(ifp->if_serializer);
2641
2642 if (info != NULL) {
b5de76b1 2643 int cpuid = sc->bnx_npoll.ifpc_cpuid;
39a8d43a
SZ
2644
2645 info->ifpi_rx[cpuid].poll_func = bnx_npoll_compat;
2646 info->ifpi_rx[cpuid].arg = NULL;
2647 info->ifpi_rx[cpuid].serializer = ifp->if_serializer;
2648
2649 if (ifp->if_flags & IFF_RUNNING)
2650 bnx_disable_intr(sc);
2651 ifp->if_npoll_cpuid = cpuid;
2652 } else {
2653 if (ifp->if_flags & IFF_RUNNING)
2654 bnx_enable_intr(sc);
2655 ifp->if_npoll_cpuid = -1;
2656 }
2657}
2658
2659static void
2660bnx_npoll_compat(struct ifnet *ifp, void *arg __unused, int cycle __unused)
6c8d8ecc
SZ
2661{
2662 struct bnx_softc *sc = ifp->if_softc;
2663 struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
2664 uint16_t rx_prod, tx_cons;
2665
39a8d43a
SZ
2666 ASSERT_SERIALIZED(ifp->if_serializer);
2667
b5de76b1
SZ
2668 if (sc->bnx_npoll.ifpc_stcount-- == 0) {
2669 sc->bnx_npoll.ifpc_stcount = sc->bnx_npoll.ifpc_stfrac;
6c8d8ecc
SZ
2670 /*
2671 * Process link state changes.
2672 */
2673 bnx_link_poll(sc);
6c8d8ecc 2674 }
39a8d43a
SZ
2675
2676 sc->bnx_status_tag = sblk->bge_status_tag;
2677
2678 /*
2679 * Use a load fence to ensure that status_tag is saved
2680 * before rx_prod and tx_cons.
2681 */
2682 cpu_lfence();
2683
2684 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
2685 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
2686
2687 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
2688 if (sc->bnx_rx_saved_considx != rx_prod)
2689 bnx_rxeof(sc, rx_prod);
2690
2691 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
2692 if (sc->bnx_tx_saved_considx != tx_cons)
2693 bnx_txeof(sc, tx_cons);
dca60051
SZ
2694
2695 if (sc->bnx_coal_chg)
2696 bnx_coal_change(sc);
6c8d8ecc
SZ
2697}
2698
39a8d43a 2699#endif /* IFPOLL_ENABLE */
6c8d8ecc
SZ
2700
2701static void
2702bnx_intr_legacy(void *xsc)
2703{
2704 struct bnx_softc *sc = xsc;
2705 struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
2706
2707 if (sc->bnx_status_tag == sblk->bge_status_tag) {
2708 uint32_t val;
2709
2710 val = pci_read_config(sc->bnx_dev, BGE_PCI_PCISTATE, 4);
2711 if (val & BGE_PCISTAT_INTR_NOTACT)
2712 return;
2713 }
2714
2715 /*
2716 * NOTE:
2717 * Interrupt will have to be disabled if tagged status
2718 * is used, else interrupt will always be asserted on
2719 * certain chips (at least on BCM5750 AX/BX).
2720 */
2721 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
2722
2723 bnx_intr(sc);
2724}
2725
2726static void
2727bnx_msi(void *xsc)
2728{
2729 struct bnx_softc *sc = xsc;
2730
2731 /* Disable interrupt first */
2732 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
2733 bnx_intr(sc);
2734}
2735
2736static void
2737bnx_msi_oneshot(void *xsc)
2738{
2739 bnx_intr(xsc);
2740}
2741
2742static void
2743bnx_intr(struct bnx_softc *sc)
2744{
2745 struct ifnet *ifp = &sc->arpcom.ac_if;
2746 struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
2747 uint16_t rx_prod, tx_cons;
2748 uint32_t status;
2749
2750 sc->bnx_status_tag = sblk->bge_status_tag;
2751 /*
2752 * Use a load fence to ensure that status_tag is saved
2753 * before rx_prod, tx_cons and status.
2754 */
2755 cpu_lfence();
2756
2757 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
2758 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
2759 status = sblk->bge_status;
2760
2761 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) || sc->bnx_link_evt)
2762 bnx_link_poll(sc);
2763
2764 if (ifp->if_flags & IFF_RUNNING) {
2765 if (sc->bnx_rx_saved_considx != rx_prod)
2766 bnx_rxeof(sc, rx_prod);
2767
2768 if (sc->bnx_tx_saved_considx != tx_cons)
2769 bnx_txeof(sc, tx_cons);
2770 }
2771
2772 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24);
2773
2774 if (sc->bnx_coal_chg)
2775 bnx_coal_change(sc);
2776}
2777
2778static void
2779bnx_tick(void *xsc)
2780{
2781 struct bnx_softc *sc = xsc;
2782 struct ifnet *ifp = &sc->arpcom.ac_if;
2783
2784 lwkt_serialize_enter(ifp->if_serializer);
2785
8ca0f604
SZ
2786 KKASSERT(mycpuid == sc->bnx_stat_cpuid);
2787
6c8d8ecc
SZ
2788 bnx_stats_update_regs(sc);
2789
2790 if (sc->bnx_flags & BNX_FLAG_TBI) {
2791 /*
2792 * Since in TBI mode auto-polling can't be used we should poll
2793 * link status manually. Here we register pending link event
2794 * and trigger interrupt.
2795 */
2796 sc->bnx_link_evt++;
2797 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
2798 } else if (!sc->bnx_link) {
2799 mii_tick(device_get_softc(sc->bnx_miibus));
2800 }
2801
2802 callout_reset(&sc->bnx_stat_timer, hz, bnx_tick, sc);
2803
2804 lwkt_serialize_exit(ifp->if_serializer);
2805}
2806
2807static void
2808bnx_stats_update_regs(struct bnx_softc *sc)
2809{
2810 struct ifnet *ifp = &sc->arpcom.ac_if;
2811 struct bge_mac_stats_regs stats;
2812 uint32_t *s;
2813 int i;
2814
2815 s = (uint32_t *)&stats;
2816 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2817 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2818 s++;
2819 }
2820
2821 ifp->if_collisions +=
2822 (stats.dot3StatsSingleCollisionFrames +
2823 stats.dot3StatsMultipleCollisionFrames +
2824 stats.dot3StatsExcessiveCollisions +
2825 stats.dot3StatsLateCollisions) -
2826 ifp->if_collisions;
2827}
2828
2829/*
2830 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2831 * pointers to descriptors.
2832 */
2833static int
2834bnx_encap(struct bnx_softc *sc, struct mbuf **m_head0, uint32_t *txidx)
2835{
2836 struct bge_tx_bd *d = NULL;
66deb1c1 2837 uint16_t csum_flags = 0, vlan_tag = 0, mss = 0;
6c8d8ecc
SZ
2838 bus_dma_segment_t segs[BNX_NSEG_NEW];
2839 bus_dmamap_t map;
2840 int error, maxsegs, nsegs, idx, i;
2841 struct mbuf *m_head = *m_head0, *m_new;
2842
66deb1c1
SZ
2843 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
2844#ifdef BNX_TSO_DEBUG
2845 int tso_nsegs;
2846#endif
2847
2848 error = bnx_setup_tso(sc, m_head0, &mss, &csum_flags);
2849 if (error)
2850 return error;
2851 m_head = *m_head0;
2852
2853#ifdef BNX_TSO_DEBUG
f0336d39
SZ
2854 tso_nsegs = (m_head->m_pkthdr.len /
2855 m_head->m_pkthdr.tso_segsz) - 1;
66deb1c1
SZ
2856 if (tso_nsegs > (BNX_TSO_NSTATS - 1))
2857 tso_nsegs = BNX_TSO_NSTATS - 1;
2858 else if (tso_nsegs < 0)
2859 tso_nsegs = 0;
2860 sc->bnx_tsosegs[tso_nsegs]++;
2861#endif
2862 } else if (m_head->m_pkthdr.csum_flags & BNX_CSUM_FEATURES) {
6c8d8ecc
SZ
2863 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2864 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2865 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2866 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2867 if (m_head->m_flags & M_LASTFRAG)
2868 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2869 else if (m_head->m_flags & M_FRAG)
2870 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2871 }
66deb1c1
SZ
2872 if (m_head->m_flags & M_VLANTAG) {
2873 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
2874 vlan_tag = m_head->m_pkthdr.ether_vlantag;
2875 }
6c8d8ecc
SZ
2876
2877 idx = *txidx;
2878 map = sc->bnx_cdata.bnx_tx_dmamap[idx];
2879
2880 maxsegs = (BGE_TX_RING_CNT - sc->bnx_txcnt) - BNX_NSEG_RSVD;
2881 KASSERT(maxsegs >= BNX_NSEG_SPARE,
2882 ("not enough segments %d", maxsegs));
2883
2884 if (maxsegs > BNX_NSEG_NEW)
2885 maxsegs = BNX_NSEG_NEW;
2886
2887 /*
2888 * Pad outbound frame to BGE_MIN_FRAMELEN for an unusual reason.
2889 * The bge hardware will pad out Tx runts to BGE_MIN_FRAMELEN,
2890 * but when such padded frames employ the bge IP/TCP checksum
2891 * offload, the hardware checksum assist gives incorrect results
2892 * (possibly from incorporating its own padding into the UDP/TCP
2893 * checksum; who knows). If we pad such runts with zeros, the
2894 * onboard checksum comes out correct.
2895 */
2896 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
2897 m_head->m_pkthdr.len < BNX_MIN_FRAMELEN) {
2898 error = m_devpad(m_head, BNX_MIN_FRAMELEN);
2899 if (error)
2900 goto back;
2901 }
2902
2903 if ((sc->bnx_flags & BNX_FLAG_SHORTDMA) && m_head->m_next != NULL) {
2904 m_new = bnx_defrag_shortdma(m_head);
2905 if (m_new == NULL) {
2906 error = ENOBUFS;
2907 goto back;
2908 }
2909 *m_head0 = m_head = m_new;
2910 }
66deb1c1
SZ
2911 if ((m_head->m_pkthdr.csum_flags & CSUM_TSO) == 0 &&
2912 sc->bnx_force_defrag && m_head->m_next != NULL) {
6c8d8ecc
SZ
2913 /*
2914 * Forcefully defragment mbuf chain to overcome hardware
2915 * limitation which only support a single outstanding
2916 * DMA read operation. If it fails, keep moving on using
2917 * the original mbuf chain.
2918 */
2919 m_new = m_defrag(m_head, MB_DONTWAIT);
2920 if (m_new != NULL)
2921 *m_head0 = m_head = m_new;
2922 }
2923
2924 error = bus_dmamap_load_mbuf_defrag(sc->bnx_cdata.bnx_tx_mtag, map,
2925 m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
2926 if (error)
2927 goto back;
2928
2929 m_head = *m_head0;
2930 bus_dmamap_sync(sc->bnx_cdata.bnx_tx_mtag, map, BUS_DMASYNC_PREWRITE);
2931
2932 for (i = 0; ; i++) {
2933 d = &sc->bnx_ldata.bnx_tx_ring[idx];
2934
2935 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
2936 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
2937 d->bge_len = segs[i].ds_len;
2938 d->bge_flags = csum_flags;
66deb1c1
SZ
2939 d->bge_vlan_tag = vlan_tag;
2940 d->bge_mss = mss;
6c8d8ecc
SZ
2941
2942 if (i == nsegs - 1)
2943 break;
2944 BNX_INC(idx, BGE_TX_RING_CNT);
2945 }
2946 /* Mark the last segment as end of packet... */
2947 d->bge_flags |= BGE_TXBDFLAG_END;
2948
6c8d8ecc
SZ
2949 /*
2950 * Insure that the map for this transmission is placed at
2951 * the array index of the last descriptor in this chain.
2952 */
2953 sc->bnx_cdata.bnx_tx_dmamap[*txidx] = sc->bnx_cdata.bnx_tx_dmamap[idx];
2954 sc->bnx_cdata.bnx_tx_dmamap[idx] = map;
2955 sc->bnx_cdata.bnx_tx_chain[idx] = m_head;
2956 sc->bnx_txcnt += nsegs;
2957
2958 BNX_INC(idx, BGE_TX_RING_CNT);
2959 *txidx = idx;
2960back:
2961 if (error) {
2962 m_freem(*m_head0);
2963 *m_head0 = NULL;
2964 }
2965 return error;
2966}
2967
2968/*
2969 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2970 * to the mbuf data regions directly in the transmit descriptors.
2971 */
2972static void
2973bnx_start(struct ifnet *ifp)
2974{
2975 struct bnx_softc *sc = ifp->if_softc;
2976 struct mbuf *m_head = NULL;
2977 uint32_t prodidx;
2978 int need_trans;
2979
2980 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
2981 return;
2982
2983 prodidx = sc->bnx_tx_prodidx;
2984
2985 need_trans = 0;
2986 while (sc->bnx_cdata.bnx_tx_chain[prodidx] == NULL) {
6c8d8ecc
SZ
2987 /*
2988 * Sanity check: avoid coming within BGE_NSEG_RSVD
2989 * descriptors of the end of the ring. Also make
2990 * sure there are BGE_NSEG_SPARE descriptors for
a1bd58c9 2991 * jumbo buffers' or TSO segments' defragmentation.
6c8d8ecc
SZ
2992 */
2993 if ((BGE_TX_RING_CNT - sc->bnx_txcnt) <
2994 (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) {
2995 ifp->if_flags |= IFF_OACTIVE;
6c8d8ecc
SZ
2996 break;
2997 }
2998
a1bd58c9
SZ
2999 m_head = ifq_dequeue(&ifp->if_snd, NULL);
3000 if (m_head == NULL)
3001 break;
3002
6c8d8ecc
SZ
3003 /*
3004 * Pack the data into the transmit ring. If we
3005 * don't have room, set the OACTIVE flag and wait
3006 * for the NIC to drain the ring.
3007 */
3008 if (bnx_encap(sc, &m_head, &prodidx)) {
3009 ifp->if_flags |= IFF_OACTIVE;
3010 ifp->if_oerrors++;
3011 break;
3012 }
3013 need_trans = 1;
3014
3015 ETHER_BPF_MTAP(ifp, m_head);
3016 }
3017
3018 if (!need_trans)
3019 return;
3020
3021 /* Transmit */
3022 bnx_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3023
3024 sc->bnx_tx_prodidx = prodidx;
3025
3026 /*
3027 * Set a timeout in case the chip goes out to lunch.
3028 */
3029 ifp->if_timer = 5;
3030}
3031
3032static void
3033bnx_init(void *xsc)
3034{
3035 struct bnx_softc *sc = xsc;
3036 struct ifnet *ifp = &sc->arpcom.ac_if;
3037 uint16_t *m;
3038 uint32_t mode;
3039
3040 ASSERT_SERIALIZED(ifp->if_serializer);
3041
3042 /* Cancel pending I/O and flush buffers. */
3043 bnx_stop(sc);
3044 bnx_reset(sc);
3045 bnx_chipinit(sc);
3046
3047 /*
3048 * Init the various state machines, ring
3049 * control blocks and firmware.
3050 */
3051 if (bnx_blockinit(sc)) {
3052 if_printf(ifp, "initialization failure\n");
3053 bnx_stop(sc);
3054 return;
3055 }
3056
3057 /* Specify MTU. */
3058 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3059 ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
3060
3061 /* Load our MAC address. */
3062 m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
3063 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3064 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3065
3066 /* Enable or disable promiscuous mode as needed. */
3067 bnx_setpromisc(sc);
3068
3069 /* Program multicast filter. */
3070 bnx_setmulti(sc);
3071
3072 /* Init RX ring. */
3073 if (bnx_init_rx_ring_std(sc)) {
3074 if_printf(ifp, "RX ring initialization failed\n");
3075 bnx_stop(sc);
3076 return;
3077 }
3078
3079 /* Init jumbo RX ring. */
3080 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) {
3081 if (bnx_init_rx_ring_jumbo(sc)) {
3082 if_printf(ifp, "Jumbo RX ring initialization failed\n");
3083 bnx_stop(sc);
3084 return;
3085 }
3086 }
3087
3088 /* Init our RX return ring index */
3089 sc->bnx_rx_saved_considx = 0;
3090
3091 /* Init TX ring. */
3092 bnx_init_tx_ring(sc);
3093
3094 /* Enable TX MAC state machine lockup fix. */
3095 mode = CSR_READ_4(sc, BGE_TX_MODE);
3096 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
3097 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
3098 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
3099 mode |= CSR_READ_4(sc, BGE_TX_MODE) &
3100 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
3101 }
3102 /* Turn on transmitter */
3103 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
3104
3105 /* Turn on receiver */
3106 BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3107
3108 /*
3109 * Set the number of good frames to receive after RX MBUF
3110 * Low Watermark has been reached. After the RX MAC receives
3111 * this number of frames, it will drop subsequent incoming
3112 * frames until the MBUF High Watermark is reached.
3113 */
bcb29629 3114 if (BNX_IS_57765_FAMILY(sc))
6c8d8ecc
SZ
3115 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
3116 else
3117 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
3118
3119 if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) {
3120 if (bootverbose) {
3121 if_printf(ifp, "MSI_MODE: %#x\n",
3122 CSR_READ_4(sc, BGE_MSI_MODE));
3123 }
3124 }
3125
3126 /* Tell firmware we're alive. */
3127 BNX_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3128
3129 /* Enable host interrupts if polling(4) is not enabled. */
3130 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA, 4);
39a8d43a
SZ
3131#ifdef IFPOLL_ENABLE
3132 if (ifp->if_flags & IFF_NPOLLING)
6c8d8ecc
SZ
3133 bnx_disable_intr(sc);
3134 else
3135#endif
3136 bnx_enable_intr(sc);
3137
3138 bnx_ifmedia_upd(ifp);
3139
3140 ifp->if_flags |= IFF_RUNNING;
3141 ifp->if_flags &= ~IFF_OACTIVE;
3142
8ca0f604
SZ
3143 callout_reset_bycpu(&sc->bnx_stat_timer, hz, bnx_tick, sc,
3144 sc->bnx_stat_cpuid);
6c8d8ecc
SZ
3145}
3146
3147/*
3148 * Set media options.
3149 */
3150static int
3151bnx_ifmedia_upd(struct ifnet *ifp)
3152{
3153 struct bnx_softc *sc = ifp->if_softc;
3154
3155 /* If this is a 1000baseX NIC, enable the TBI port. */
3156 if (sc->bnx_flags & BNX_FLAG_TBI) {
3157 struct ifmedia *ifm = &sc->bnx_ifmedia;
3158
3159 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3160 return(EINVAL);
3161
3162 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3163 case IFM_AUTO:
3164 break;
3165
3166 case IFM_1000_SX:
3167 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3168 BNX_CLRBIT(sc, BGE_MAC_MODE,
3169 BGE_MACMODE_HALF_DUPLEX);
3170 } else {
3171 BNX_SETBIT(sc, BGE_MAC_MODE,
3172 BGE_MACMODE_HALF_DUPLEX);
3173 }
3174 break;
3175 default:
3176 return(EINVAL);
3177 }
3178 } else {
3179 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3180
3181 sc->bnx_link_evt++;
3182 sc->bnx_link = 0;
3183 if (mii->mii_instance) {
3184 struct mii_softc *miisc;
3185
3186 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3187 mii_phy_reset(miisc);
3188 }
3189 mii_mediachg(mii);
3190
3191 /*
3192 * Force an interrupt so that we will call bnx_link_upd
3193 * if needed and clear any pending link state attention.
3194 * Without this we are not getting any further interrupts
3195 * for link state changes and thus will not UP the link and
3196 * not be able to send in bnx_start. The only way to get
3197 * things working was to receive a packet and get an RX
3198 * intr.
3199 *
3200 * bnx_tick should help for fiber cards and we might not
3201 * need to do this here if BNX_FLAG_TBI is set but as
3202 * we poll for fiber anyway it should not harm.
3203 */
3204 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3205 }
3206 return(0);
3207}
3208
3209/*
3210 * Report current media status.
3211 */
3212static void
3213bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3214{
3215 struct bnx_softc *sc = ifp->if_softc;
3216
3217 if (sc->bnx_flags & BNX_FLAG_TBI) {
3218 ifmr->ifm_status = IFM_AVALID;
3219 ifmr->ifm_active = IFM_ETHER;
3220 if (CSR_READ_4(sc, BGE_MAC_STS) &
3221 BGE_MACSTAT_TBI_PCS_SYNCHED) {
3222 ifmr->ifm_status |= IFM_ACTIVE;
3223 } else {
3224 ifmr->ifm_active |= IFM_NONE;
3225 return;
3226 }
3227
3228 ifmr->ifm_active |= IFM_1000_SX;
3229 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3230 ifmr->ifm_active |= IFM_HDX;
3231 else
3232 ifmr->ifm_active |= IFM_FDX;
3233 } else {
3234 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3235
3236 mii_pollstat(mii);
3237 ifmr->ifm_active = mii->mii_media_active;
3238 ifmr->ifm_status = mii->mii_media_status;
3239 }
3240}
3241
3242static int
3243bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
3244{
3245 struct bnx_softc *sc = ifp->if_softc;
3246 struct ifreq *ifr = (struct ifreq *)data;
3247 int mask, error = 0;
3248
3249 ASSERT_SERIALIZED(ifp->if_serializer);
3250
3251 switch (command) {
3252 case SIOCSIFMTU:
3253 if ((!BNX_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) ||
3254 (BNX_IS_JUMBO_CAPABLE(sc) &&
3255 ifr->ifr_mtu > BNX_JUMBO_MTU)) {
3256 error = EINVAL;
3257 } else if (ifp->if_mtu != ifr->ifr_mtu) {
3258 ifp->if_mtu = ifr->ifr_mtu;
3259 if (ifp->if_flags & IFF_RUNNING)
3260 bnx_init(sc);
3261 }
3262 break;
3263 case SIOCSIFFLAGS:
3264 if (ifp->if_flags & IFF_UP) {
3265 if (ifp->if_flags & IFF_RUNNING) {
3266 mask = ifp->if_flags ^ sc->bnx_if_flags;
3267
3268 /*
3269 * If only the state of the PROMISC flag
3270 * changed, then just use the 'set promisc
3271 * mode' command instead of reinitializing
3272 * the entire NIC. Doing a full re-init
3273 * means reloading the firmware and waiting
3274 * for it to start up, which may take a
3275 * second or two. Similarly for ALLMULTI.
3276 */
3277 if (mask & IFF_PROMISC)
3278 bnx_setpromisc(sc);
3279 if (mask & IFF_ALLMULTI)
3280 bnx_setmulti(sc);
3281 } else {
3282 bnx_init(sc);
3283 }
3284 } else if (ifp->if_flags & IFF_RUNNING) {
3285 bnx_stop(sc);
3286 }
3287 sc->bnx_if_flags = ifp->if_flags;
3288 break;
3289 case SIOCADDMULTI:
3290 case SIOCDELMULTI:
3291 if (ifp->if_flags & IFF_RUNNING)
3292 bnx_setmulti(sc);
3293 break;
3294 case SIOCSIFMEDIA:
3295 case SIOCGIFMEDIA:
3296 if (sc->bnx_flags & BNX_FLAG_TBI) {
3297 error = ifmedia_ioctl(ifp, ifr,
3298 &sc->bnx_ifmedia, command);
3299 } else {
3300 struct mii_data *mii;
3301
3302 mii = device_get_softc(sc->bnx_miibus);
3303 error = ifmedia_ioctl(ifp, ifr,
3304 &mii->mii_media, command);
3305 }
3306 break;
3307 case SIOCSIFCAP:
3308 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3309 if (mask & IFCAP_HWCSUM) {
3310 ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
66deb1c1
SZ
3311 if (ifp->if_capenable & IFCAP_TXCSUM)
3312 ifp->if_hwassist |= BNX_CSUM_FEATURES;
6c8d8ecc 3313 else
66deb1c1
SZ
3314 ifp->if_hwassist &= ~BNX_CSUM_FEATURES;
3315 }
3316 if (mask & IFCAP_TSO) {
3317 ifp->if_capenable ^= (mask & IFCAP_TSO);
3318 if (ifp->if_capenable & IFCAP_TSO)
3319 ifp->if_hwassist |= CSUM_TSO;
3320 else
3321 ifp->if_hwassist &= ~CSUM_TSO;
6c8d8ecc
SZ
3322 }
3323 break;
3324 default:
3325 error = ether_ioctl(ifp, command, data);
3326 break;
3327 }
3328 return error;
3329}
3330
3331static void
3332bnx_watchdog(struct ifnet *ifp)
3333{
3334 struct bnx_softc *sc = ifp->if_softc;
3335
3336 if_printf(ifp, "watchdog timeout -- resetting\n");
3337
3338 bnx_init(sc);
3339
3340 ifp->if_oerrors++;
3341
3342 if (!ifq_is_empty(&ifp->if_snd))
3343 if_devstart(ifp);
3344}
3345
3346/*
3347 * Stop the adapter and free any mbufs allocated to the
3348 * RX and TX lists.
3349 */
3350static void
3351bnx_stop(struct bnx_softc *sc)
3352{
3353 struct ifnet *ifp = &sc->arpcom.ac_if;
3354
3355 ASSERT_SERIALIZED(ifp->if_serializer);
3356
3357 callout_stop(&sc->bnx_stat_timer);
3358
3359 /*
3360 * Disable all of the receiver blocks
3361 */
3362 bnx_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3363 bnx_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3364 bnx_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3365 bnx_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3366 bnx_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3367 bnx_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3368
3369 /*
3370 * Disable all of the transmit blocks
3371 */
3372 bnx_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3373 bnx_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3374 bnx_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3375 bnx_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3376 bnx_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3377 bnx_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3378
3379 /*
3380 * Shut down all of the memory managers and related
3381 * state machines.
3382 */
3383 bnx_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3384 bnx_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3385 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3386 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3387
3388 /* Disable host interrupts. */
3389 bnx_disable_intr(sc);
3390
3391 /*
3392 * Tell firmware we're shutting down.
3393 */
3394 BNX_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3395
3396 /* Free the RX lists. */
3397 bnx_free_rx_ring_std(sc);
3398
3399 /* Free jumbo RX list. */
3400 if (BNX_IS_JUMBO_CAPABLE(sc))
3401 bnx_free_rx_ring_jumbo(sc);
3402
3403 /* Free TX buffers. */
3404 bnx_free_tx_ring(sc);
3405
3406 sc->bnx_status_tag = 0;
3407 sc->bnx_link = 0;
3408 sc->bnx_coal_chg = 0;
3409
3410 sc->bnx_tx_saved_considx = BNX_TXCONS_UNSET;
3411
3412 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3413 ifp->if_timer = 0;
3414}
3415
3416/*
3417 * Stop all chip I/O so that the kernel's probe routines don't
3418 * get confused by errant DMAs when rebooting.
3419 */
3420static void
3421bnx_shutdown(device_t dev)
3422{
3423 struct bnx_softc *sc = device_get_softc(dev);
3424 struct ifnet *ifp = &sc->arpcom.ac_if;
3425
3426 lwkt_serialize_enter(ifp->if_serializer);
3427 bnx_stop(sc);
3428 bnx_reset(sc);
3429 lwkt_serialize_exit(ifp->if_serializer);
3430}
3431
3432static int
3433bnx_suspend(device_t dev)
3434{
3435 struct bnx_softc *sc = device_get_softc(dev);
3436 struct ifnet *ifp = &sc->arpcom.ac_if;
3437
3438 lwkt_serialize_enter(ifp->if_serializer);
3439 bnx_stop(sc);
3440 lwkt_serialize_exit(ifp->if_serializer);
3441
3442 return 0;
3443}
3444
3445static int
3446bnx_resume(device_t dev)
3447{
3448 struct bnx_softc *sc = device_get_softc(dev);
3449 struct ifnet *ifp = &sc->arpcom.ac_if;
3450
3451 lwkt_serialize_enter(ifp->if_serializer);
3452
3453 if (ifp->if_flags & IFF_UP) {
3454 bnx_init(sc);
3455
3456 if (!ifq_is_empty(&ifp->if_snd))
3457 if_devstart(ifp);
3458 }
3459
3460 lwkt_serialize_exit(ifp->if_serializer);
3461
3462 return 0;
3463}
3464
3465static void
3466bnx_setpromisc(struct bnx_softc *sc)
3467{
3468 struct ifnet *ifp = &sc->arpcom.ac_if;
3469
3470 if (ifp->if_flags & IFF_PROMISC)
3471 BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3472 else
3473 BNX_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3474}
3475
3476static void
3477bnx_dma_free(struct bnx_softc *sc)
3478{
3479 int i;
3480
3481 /* Destroy RX mbuf DMA stuffs. */
3482 if (sc->bnx_cdata.bnx_rx_mtag != NULL) {
3483 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3484 bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag,
3485 sc->bnx_cdata.bnx_rx_std_dmamap[i]);
3486 }
3487 bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag,
3488 sc->bnx_cdata.bnx_rx_tmpmap);
3489 bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag);
3490 }
3491
3492 /* Destroy TX mbuf DMA stuffs. */
3493 if (sc->bnx_cdata.bnx_tx_mtag != NULL) {
3494 for (i = 0; i < BGE_TX_RING_CNT; i++) {
3495 bus_dmamap_destroy(sc->bnx_cdata.bnx_tx_mtag,
3496 sc->bnx_cdata.bnx_tx_dmamap[i]);
3497 }
3498 bus_dma_tag_destroy(sc->bnx_cdata.bnx_tx_mtag);
3499 }
3500
3501 /* Destroy standard RX ring */
3502 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_std_ring_tag,
3503 sc->bnx_cdata.bnx_rx_std_ring_map,
3504 sc->bnx_ldata.bnx_rx_std_ring);
3505
3506 if (BNX_IS_JUMBO_CAPABLE(sc))
3507 bnx_free_jumbo_mem(sc);
3508
3509 /* Destroy RX return ring */
3510 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_return_ring_tag,
3511 sc->bnx_cdata.bnx_rx_return_ring_map,
3512 sc->bnx_ldata.bnx_rx_return_ring);
3513
3514 /* Destroy TX ring */
3515 bnx_dma_block_free(sc->bnx_cdata.bnx_tx_ring_tag,
3516 sc->bnx_cdata.bnx_tx_ring_map,
3517 sc->bnx_ldata.bnx_tx_ring);
3518
3519 /* Destroy status block */
3520 bnx_dma_block_free(sc->bnx_cdata.bnx_status_tag,
3521 sc->bnx_cdata.bnx_status_map,
3522 sc->bnx_ldata.bnx_status_block);
3523
3524 /* Destroy the parent tag */
3525 if (sc->bnx_cdata.bnx_parent_tag != NULL)
3526 bus_dma_tag_destroy(sc->bnx_cdata.bnx_parent_tag);
3527}
3528
3529static int
3530bnx_dma_alloc(struct bnx_softc *sc)
3531{
3532 struct ifnet *ifp = &sc->arpcom.ac_if;
66deb1c1 3533 bus_size_t txmaxsz;
6c8d8ecc
SZ
3534 int i, error;
3535
3536 /*
3537 * Allocate the parent bus DMA tag appropriate for PCI.
3538 *
3539 * All of the NetExtreme/NetLink controllers have 4GB boundary
3540 * DMA bug.
3541 * Whenever an address crosses a multiple of the 4GB boundary
3542 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
3543 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
3544 * state machine will lockup and cause the device to hang.
3545 */
3546 error = bus_dma_tag_create(NULL, 1, BGE_DMA_BOUNDARY_4G,
3547 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3548 NULL, NULL,
3549 BUS_SPACE_MAXSIZE_32BIT, 0,
3550 BUS_SPACE_MAXSIZE_32BIT,
3551 0, &sc->bnx_cdata.bnx_parent_tag);
3552 if (error) {
3553 if_printf(ifp, "could not allocate parent dma tag\n");
3554 return error;
3555 }
3556
3557 /*
3558 * Create DMA tag and maps for RX mbufs.
3559 */
3560 error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0,
3561 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3562 NULL, NULL, MCLBYTES, 1, MCLBYTES,
3563 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
3564 &sc->bnx_cdata.bnx_rx_mtag);
3565 if (error) {
3566 if_printf(ifp, "could not allocate RX mbuf dma tag\n");
3567 return error;
3568 }
3569
3570 error = bus_dmamap_create(sc->bnx_cdata.bnx_rx_mtag,
3571 BUS_DMA_WAITOK, &sc->bnx_cdata.bnx_rx_tmpmap);
3572 if (error) {
3573 bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag);
3574 sc->bnx_cdata.bnx_rx_mtag = NULL;
3575 return error;
3576 }
3577
3578 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3579 error = bus_dmamap_create(sc->bnx_cdata.bnx_rx_mtag,
3580 BUS_DMA_WAITOK,
3581 &sc->bnx_cdata.bnx_rx_std_dmamap[i]);
3582 if (error) {
3583 int j;
3584
3585 for (j = 0; j < i; ++j) {
3586 bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag,
3587 sc->bnx_cdata.bnx_rx_std_dmamap[j]);
3588 }
3589 bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag);
3590 sc->bnx_cdata.bnx_rx_mtag = NULL;
3591
3592 if_printf(ifp, "could not create DMA map for RX\n");
3593 return error;
3594 }
3595 }
3596
3597 /*
3598 * Create DMA tag and maps for TX mbufs.
3599 */
66deb1c1
SZ
3600 if (sc->bnx_flags & BNX_FLAG_TSO)
3601 txmaxsz = IP_MAXPACKET + sizeof(struct ether_vlan_header);
3602 else
3603 txmaxsz = BNX_JUMBO_FRAMELEN;
6c8d8ecc
SZ
3604 error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0,
3605 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3606 NULL, NULL,
66deb1c1 3607 txmaxsz, BNX_NSEG_NEW, PAGE_SIZE,
6c8d8ecc
SZ
3608 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK |
3609 BUS_DMA_ONEBPAGE,
3610 &sc->bnx_cdata.bnx_tx_mtag);
3611 if (error) {
3612 if_printf(ifp, "could not allocate TX mbuf dma tag\n");
3613 return error;
3614 }
3615
3616 for (i = 0; i < BGE_TX_RING_CNT; i++) {
3617 error = bus_dmamap_create(sc->bnx_cdata.bnx_tx_mtag,
3618 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
3619 &sc->bnx_cdata.bnx_tx_dmamap[i]);
3620 if (error) {
3621 int j;
3622
3623 for (j = 0; j < i; ++j) {
3624 bus_dmamap_destroy(sc->bnx_cdata.bnx_tx_mtag,
3625 sc->bnx_cdata.bnx_tx_dmamap[j]);
3626 }
3627 bus_dma_tag_destroy(sc->bnx_cdata.bnx_tx_mtag);
3628 sc->bnx_cdata.bnx_tx_mtag = NULL;
3629
3630 if_printf(ifp, "could not create DMA map for TX\n");
3631 return error;
3632 }
3633 }
3634
3635 /*
3636 * Create DMA stuffs for standard RX ring.
3637 */
3638 error = bnx_dma_block_alloc(sc, BGE_STD_RX_RING_SZ,
3639 &sc->bnx_cdata.bnx_rx_std_ring_tag,
3640 &sc->bnx_cdata.bnx_rx_std_ring_map,
3641 (void *)&sc->bnx_ldata.bnx_rx_std_ring,
3642 &sc->bnx_ldata.bnx_rx_std_ring_paddr);
3643 if (error) {
3644 if_printf(ifp, "could not create std RX ring\n");
3645 return error;
3646 }
3647
3648 /*
3649 * Create jumbo buffer pool.
3650 */
3651 if (BNX_IS_JUMBO_CAPABLE(sc)) {
3652 error = bnx_alloc_jumbo_mem(sc);
3653 if (error) {
3654 if_printf(ifp, "could not create jumbo buffer pool\n");
3655 return error;
3656 }
3657 }
3658
3659 /*
3660 * Create DMA stuffs for RX return ring.
3661 */
3662 error = bnx_dma_block_alloc(sc,
3663 BGE_RX_RTN_RING_SZ(sc->bnx_return_ring_cnt),
3664 &sc->bnx_cdata.bnx_rx_return_ring_tag,
3665 &sc->bnx_cdata.bnx_rx_return_ring_map,
3666 (void *)&sc->bnx_ldata.bnx_rx_return_ring,
3667 &sc->bnx_ldata.bnx_rx_return_ring_paddr);
3668 if (error) {
3669 if_printf(ifp, "could not create RX ret ring\n");
3670 return error;
3671 }
3672
3673 /*
3674 * Create DMA stuffs for TX ring.
3675 */
3676 error = bnx_dma_block_alloc(sc, BGE_TX_RING_SZ,
3677 &sc->bnx_cdata.bnx_tx_ring_tag,
3678 &sc->bnx_cdata.bnx_tx_ring_map,
3679 (void *)&sc->bnx_ldata.bnx_tx_ring,
3680 &sc->bnx_ldata.bnx_tx_ring_paddr);
3681 if (error) {
3682 if_printf(ifp, "could not create TX ring\n");
3683 return error;
3684 }
3685
3686 /*
3687 * Create DMA stuffs for status block.
3688 */
3689 error = bnx_dma_block_alloc(sc, BGE_STATUS_BLK_SZ,
3690 &sc->bnx_cdata.bnx_status_tag,
3691 &sc->bnx_cdata.bnx_status_map,
3692 (void *)&sc->bnx_ldata.bnx_status_block,
3693 &sc->bnx_ldata.bnx_status_block_paddr);
3694 if (error) {
3695 if_printf(ifp, "could not create status block\n");
3696 return error;
3697 }
3698
3699 return 0;
3700}
3701
3702static int
3703bnx_dma_block_alloc(struct bnx_softc *sc, bus_size_t size, bus_dma_tag_t *tag,
3704 bus_dmamap_t *map, void **addr, bus_addr_t *paddr)
3705{
3706 bus_dmamem_t dmem;
3707 int error;
3708
3709 error = bus_dmamem_coherent(sc->bnx_cdata.bnx_parent_tag, PAGE_SIZE, 0,
3710 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3711 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3712 if (error)
3713 return error;
3714
3715 *tag = dmem.dmem_tag;
3716 *map = dmem.dmem_map;
3717 *addr = dmem.dmem_addr;
3718 *paddr = dmem.dmem_busaddr;
3719
3720 return 0;
3721}
3722
3723static void
3724bnx_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr)
3725{
3726 if (tag != NULL) {
3727 bus_dmamap_unload(tag, map);
3728 bus_dmamem_free(tag, addr, map);
3729 bus_dma_tag_destroy(tag);
3730 }
3731}
3732
3733static void
3734bnx_tbi_link_upd(struct bnx_softc *sc, uint32_t status)
3735{
3736 struct ifnet *ifp = &sc->arpcom.ac_if;
3737
3738#define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE)
3739
3740 /*
3741 * Sometimes PCS encoding errors are detected in
3742 * TBI mode (on fiber NICs), and for some reason
3743 * the chip will signal them as link changes.
3744 * If we get a link change event, but the 'PCS
3745 * encoding error' bit in the MAC status register
3746 * is set, don't bother doing a link check.
3747 * This avoids spurious "gigabit link up" messages
3748 * that sometimes appear on fiber NICs during
3749 * periods of heavy traffic.
3750 */
3751 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3752 if (!sc->bnx_link) {
3753 sc->bnx_link++;
3754 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704) {
3755 BNX_CLRBIT(sc, BGE_MAC_MODE,
3756 BGE_MACMODE_TBI_SEND_CFGS);
3757 }
3758 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3759
3760 if (bootverbose)
3761 if_printf(ifp, "link UP\n");
3762
3763 ifp->if_link_state = LINK_STATE_UP;
3764 if_link_state_change(ifp);
3765 }
3766 } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) {
3767 if (sc->bnx_link) {
3768 sc->bnx_link = 0;
3769
3770 if (bootverbose)
3771 if_printf(ifp, "link DOWN\n");
3772
3773 ifp->if_link_state = LINK_STATE_DOWN;
3774 if_link_state_change(ifp);
3775 }
3776 }
3777
3778#undef PCS_ENCODE_ERR
3779
3780 /* Clear the attention. */
3781 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3782 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3783 BGE_MACSTAT_LINK_CHANGED);
3784}
3785
3786static void
3787bnx_copper_link_upd(struct bnx_softc *sc, uint32_t status __unused)
3788{
3789 struct ifnet *ifp = &sc->arpcom.ac_if;
3790 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3791
3792 mii_pollstat(mii);
3793 bnx_miibus_statchg(sc->bnx_dev);
3794
3795 if (bootverbose) {
3796 if (sc->bnx_link)
3797 if_printf(ifp, "link UP\n");
3798 else
3799 if_printf(ifp, "link DOWN\n");
3800 }
3801
3802 /* Clear the attention. */
3803 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3804 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3805 BGE_MACSTAT_LINK_CHANGED);
3806}
3807
3808static void
3809bnx_autopoll_link_upd(struct bnx_softc *sc, uint32_t status __unused)
3810{
3811 struct ifnet *ifp = &sc->arpcom.ac_if;
3812 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3813
3814 mii_pollstat(mii);
3815
3816 if (!sc->bnx_link &&
3817 (mii->mii_media_status & IFM_ACTIVE) &&
3818 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3819 sc->bnx_link++;
3820 if (bootverbose)
3821 if_printf(ifp, "link UP\n");
3822 } else if (sc->bnx_link &&
3823 (!(mii->mii_media_status & IFM_ACTIVE) ||
3824 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3825 sc->bnx_link = 0;
3826 if (bootverbose)
3827 if_printf(ifp, "link DOWN\n");
3828 }
3829
3830 /* Clear the attention. */
3831 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3832 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3833 BGE_MACSTAT_LINK_CHANGED);
3834}
3835
3836static int
3837bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS)
3838{
3839 struct bnx_softc *sc = arg1;
3840
3841 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3842 &sc->bnx_rx_coal_ticks,
3843 BNX_RX_COAL_TICKS_MIN, BNX_RX_COAL_TICKS_MAX,
3844 BNX_RX_COAL_TICKS_CHG);
3845}
3846
3847static int
3848bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS)
3849{
3850 struct bnx_softc *sc = arg1;
3851
3852 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3853 &sc->bnx_tx_coal_ticks,
3854 BNX_TX_COAL_TICKS_MIN, BNX_TX_COAL_TICKS_MAX,
3855 BNX_TX_COAL_TICKS_CHG);
3856}
3857
3858static int
3859bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS)
3860{
3861 struct bnx_softc *sc = arg1;
3862
3863 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3864 &sc->bnx_rx_coal_bds,
3865 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX,
3866 BNX_RX_COAL_BDS_CHG);
3867}
3868
3869static int
3870bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS)
3871{
3872 struct bnx_softc *sc = arg1;
3873
3874 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3875 &sc->bnx_tx_coal_bds,
3876 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX,
3877 BNX_TX_COAL_BDS_CHG);
3878}
3879
3880static int
3881bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS)
3882{
3883 struct bnx_softc *sc = arg1;
3884
3885 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3886 &sc->bnx_rx_coal_bds_int,
3887 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX,
3888 BNX_RX_COAL_BDS_INT_CHG);
3889}
3890
3891static int
3892bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS)
3893{
3894 struct bnx_softc *sc = arg1;
3895
3896 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3897 &sc->bnx_tx_coal_bds_int,
3898 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX,
3899 BNX_TX_COAL_BDS_INT_CHG);
3900}
3901
3902static int
3903bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal,
3904 int coal_min, int coal_max, uint32_t coal_chg_mask)
3905{
3906 struct bnx_softc *sc = arg1;
3907 struct ifnet *ifp = &sc->arpcom.ac_if;
3908 int error = 0, v;
3909
3910 lwkt_serialize_enter(ifp->if_serializer);
3911
3912 v = *coal;