ifpoll: Make status fraction and TX fraction easier to read
[dragonfly.git] / sys / dev / netif / bnx / if_bnx.c
CommitLineData
6c8d8ecc
SZ
1/*
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
34 */
35
66deb1c1 36#include "opt_bnx.h"
39a8d43a 37#include "opt_ifpoll.h"
6c8d8ecc
SZ
38
39#include <sys/param.h>
40#include <sys/bus.h>
41#include <sys/endian.h>
42#include <sys/kernel.h>
43#include <sys/interrupt.h>
44#include <sys/mbuf.h>
45#include <sys/malloc.h>
46#include <sys/queue.h>
47#include <sys/rman.h>
48#include <sys/serialize.h>
49#include <sys/socket.h>
50#include <sys/sockio.h>
51#include <sys/sysctl.h>
52
66deb1c1
SZ
53#include <netinet/ip.h>
54#include <netinet/tcp.h>
55
6c8d8ecc
SZ
56#include <net/bpf.h>
57#include <net/ethernet.h>
58#include <net/if.h>
59#include <net/if_arp.h>
60#include <net/if_dl.h>
61#include <net/if_media.h>
39a8d43a 62#include <net/if_poll.h>
6c8d8ecc
SZ
63#include <net/if_types.h>
64#include <net/ifq_var.h>
65#include <net/vlan/if_vlan_var.h>
66#include <net/vlan/if_vlan_ether.h>
67
68#include <dev/netif/mii_layer/mii.h>
69#include <dev/netif/mii_layer/miivar.h>
70#include <dev/netif/mii_layer/brgphyreg.h>
71
72#include <bus/pci/pcidevs.h>
73#include <bus/pci/pcireg.h>
74#include <bus/pci/pcivar.h>
75
76#include <dev/netif/bge/if_bgereg.h>
77#include <dev/netif/bnx/if_bnxvar.h>
78
79/* "device miibus" required. See GENERIC if you get errors here. */
80#include "miibus_if.h"
81
3b18363f 82#define BNX_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
6c8d8ecc 83
df9ccc98
SZ
84#define BNX_INTR_CKINTVL ((10 * hz) / 1000) /* 10ms */
85
6c8d8ecc
SZ
86static const struct bnx_type {
87 uint16_t bnx_vid;
88 uint16_t bnx_did;
89 char *bnx_name;
90} bnx_devs[] = {
91 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717,
92 "Broadcom BCM5717 Gigabit Ethernet" },
93 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718,
94 "Broadcom BCM5718 Gigabit Ethernet" },
95 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5719,
96 "Broadcom BCM5719 Gigabit Ethernet" },
97 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720_ALT,
98 "Broadcom BCM5720 Gigabit Ethernet" },
99
100 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57761,
101 "Broadcom BCM57761 Gigabit Ethernet" },
32ff3c80
SZ
102 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57762,
103 "Broadcom BCM57762 Gigabit Ethernet" },
6c8d8ecc
SZ
104 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57765,
105 "Broadcom BCM57765 Gigabit Ethernet" },
32ff3c80
SZ
106 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57766,
107 "Broadcom BCM57766 Gigabit Ethernet" },
108 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57781,
109 "Broadcom BCM57781 Gigabit Ethernet" },
110 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57782,
111 "Broadcom BCM57782 Gigabit Ethernet" },
6c8d8ecc
SZ
112 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57785,
113 "Broadcom BCM57785 Gigabit Ethernet" },
32ff3c80
SZ
114 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57786,
115 "Broadcom BCM57786 Gigabit Ethernet" },
116 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57791,
117 "Broadcom BCM57791 Fast Ethernet" },
6c8d8ecc
SZ
118 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57795,
119 "Broadcom BCM57795 Fast Ethernet" },
120
121 { 0, 0, NULL }
122};
123
124#define BNX_IS_JUMBO_CAPABLE(sc) ((sc)->bnx_flags & BNX_FLAG_JUMBO)
125#define BNX_IS_5717_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_5717_PLUS)
f368d0d9
SZ
126#define BNX_IS_57765_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_57765_PLUS)
127#define BNX_IS_57765_FAMILY(sc) \
128 ((sc)->bnx_flags & BNX_FLAG_57765_FAMILY)
6c8d8ecc
SZ
129
130typedef int (*bnx_eaddr_fcn_t)(struct bnx_softc *, uint8_t[]);
131
132static int bnx_probe(device_t);
133static int bnx_attach(device_t);
134static int bnx_detach(device_t);
135static void bnx_shutdown(device_t);
136static int bnx_suspend(device_t);
137static int bnx_resume(device_t);
138static int bnx_miibus_readreg(device_t, int, int);
139static int bnx_miibus_writereg(device_t, int, int, int);
140static void bnx_miibus_statchg(device_t);
141
39a8d43a
SZ
142#ifdef IFPOLL_ENABLE
143static void bnx_npoll(struct ifnet *, struct ifpoll_info *);
144static void bnx_npoll_compat(struct ifnet *, void *, int);
6c8d8ecc
SZ
145#endif
146static void bnx_intr_legacy(void *);
147static void bnx_msi(void *);
148static void bnx_msi_oneshot(void *);
149static void bnx_intr(struct bnx_softc *);
150static void bnx_enable_intr(struct bnx_softc *);
151static void bnx_disable_intr(struct bnx_softc *);
152static void bnx_txeof(struct bnx_softc *, uint16_t);
153static void bnx_rxeof(struct bnx_softc *, uint16_t);
154
155static void bnx_start(struct ifnet *);
156static int bnx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
157static void bnx_init(void *);
158static void bnx_stop(struct bnx_softc *);
159static void bnx_watchdog(struct ifnet *);
160static int bnx_ifmedia_upd(struct ifnet *);
161static void bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *);
162static void bnx_tick(void *);
163
164static int bnx_alloc_jumbo_mem(struct bnx_softc *);
165static void bnx_free_jumbo_mem(struct bnx_softc *);
166static struct bnx_jslot
167 *bnx_jalloc(struct bnx_softc *);
168static void bnx_jfree(void *);
169static void bnx_jref(void *);
170static int bnx_newbuf_std(struct bnx_softc *, int, int);
171static int bnx_newbuf_jumbo(struct bnx_softc *, int, int);
172static void bnx_setup_rxdesc_std(struct bnx_softc *, int);
173static void bnx_setup_rxdesc_jumbo(struct bnx_softc *, int);
174static int bnx_init_rx_ring_std(struct bnx_softc *);
175static void bnx_free_rx_ring_std(struct bnx_softc *);
176static int bnx_init_rx_ring_jumbo(struct bnx_softc *);
177static void bnx_free_rx_ring_jumbo(struct bnx_softc *);
178static void bnx_free_tx_ring(struct bnx_softc *);
179static int bnx_init_tx_ring(struct bnx_softc *);
180static int bnx_dma_alloc(struct bnx_softc *);
181static void bnx_dma_free(struct bnx_softc *);
182static int bnx_dma_block_alloc(struct bnx_softc *, bus_size_t,
183 bus_dma_tag_t *, bus_dmamap_t *, void **, bus_addr_t *);
184static void bnx_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
185static struct mbuf *
186 bnx_defrag_shortdma(struct mbuf *);
187static int bnx_encap(struct bnx_softc *, struct mbuf **, uint32_t *);
66deb1c1
SZ
188static int bnx_setup_tso(struct bnx_softc *, struct mbuf **,
189 uint16_t *, uint16_t *);
6c8d8ecc
SZ
190
191static void bnx_reset(struct bnx_softc *);
192static int bnx_chipinit(struct bnx_softc *);
193static int bnx_blockinit(struct bnx_softc *);
194static void bnx_stop_block(struct bnx_softc *, bus_size_t, uint32_t);
195static void bnx_enable_msi(struct bnx_softc *sc);
196static void bnx_setmulti(struct bnx_softc *);
197static void bnx_setpromisc(struct bnx_softc *);
198static void bnx_stats_update_regs(struct bnx_softc *);
199static uint32_t bnx_dma_swap_options(struct bnx_softc *);
200
201static uint32_t bnx_readmem_ind(struct bnx_softc *, uint32_t);
202static void bnx_writemem_ind(struct bnx_softc *, uint32_t, uint32_t);
203#ifdef notdef
204static uint32_t bnx_readreg_ind(struct bnx_softc *, uint32_t);
205#endif
206static void bnx_writereg_ind(struct bnx_softc *, uint32_t, uint32_t);
207static void bnx_writemem_direct(struct bnx_softc *, uint32_t, uint32_t);
208static void bnx_writembx(struct bnx_softc *, int, int);
209static uint8_t bnx_nvram_getbyte(struct bnx_softc *, int, uint8_t *);
210static int bnx_read_nvram(struct bnx_softc *, caddr_t, int, int);
211static uint8_t bnx_eeprom_getbyte(struct bnx_softc *, uint32_t, uint8_t *);
212static int bnx_read_eeprom(struct bnx_softc *, caddr_t, uint32_t, size_t);
213
214static void bnx_tbi_link_upd(struct bnx_softc *, uint32_t);
215static void bnx_copper_link_upd(struct bnx_softc *, uint32_t);
216static void bnx_autopoll_link_upd(struct bnx_softc *, uint32_t);
217static void bnx_link_poll(struct bnx_softc *);
218
219static int bnx_get_eaddr_mem(struct bnx_softc *, uint8_t[]);
220static int bnx_get_eaddr_nvram(struct bnx_softc *, uint8_t[]);
221static int bnx_get_eaddr_eeprom(struct bnx_softc *, uint8_t[]);
222static int bnx_get_eaddr(struct bnx_softc *, uint8_t[]);
223
224static void bnx_coal_change(struct bnx_softc *);
225static int bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS);
226static int bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS);
227static int bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS);
228static int bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS);
229static int bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS);
230static int bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS);
231static int bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *,
232 int, int, uint32_t);
39a8d43a
SZ
233#ifdef IFPOLL_ENABLE
234static int bnx_sysctl_npoll_stfrac(SYSCTL_HANDLER_ARGS);
235static int bnx_sysctl_npoll_cpuid(SYSCTL_HANDLER_ARGS);
236#endif
6c8d8ecc
SZ
237
238static int bnx_msi_enable = 1;
239TUNABLE_INT("hw.bnx.msi.enable", &bnx_msi_enable);
240
241static device_method_t bnx_methods[] = {
242 /* Device interface */
243 DEVMETHOD(device_probe, bnx_probe),
244 DEVMETHOD(device_attach, bnx_attach),
245 DEVMETHOD(device_detach, bnx_detach),
246 DEVMETHOD(device_shutdown, bnx_shutdown),
247 DEVMETHOD(device_suspend, bnx_suspend),
248 DEVMETHOD(device_resume, bnx_resume),
249
250 /* bus interface */
251 DEVMETHOD(bus_print_child, bus_generic_print_child),
252 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
253
254 /* MII interface */
255 DEVMETHOD(miibus_readreg, bnx_miibus_readreg),
256 DEVMETHOD(miibus_writereg, bnx_miibus_writereg),
257 DEVMETHOD(miibus_statchg, bnx_miibus_statchg),
258
259 { 0, 0 }
260};
261
262static DEFINE_CLASS_0(bnx, bnx_driver, bnx_methods, sizeof(struct bnx_softc));
263static devclass_t bnx_devclass;
264
265DECLARE_DUMMY_MODULE(if_bnx);
266DRIVER_MODULE(if_bnx, pci, bnx_driver, bnx_devclass, NULL, NULL);
267DRIVER_MODULE(miibus, bnx, miibus_driver, miibus_devclass, NULL, NULL);
268
269static uint32_t
270bnx_readmem_ind(struct bnx_softc *sc, uint32_t off)
271{
272 device_t dev = sc->bnx_dev;
273 uint32_t val;
274
275 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
276 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
277 return 0;
278
279 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
280 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
281 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
282 return (val);
283}
284
285static void
286bnx_writemem_ind(struct bnx_softc *sc, uint32_t off, uint32_t val)
287{
288 device_t dev = sc->bnx_dev;
289
290 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
291 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
292 return;
293
294 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
295 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
296 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
297}
298
299#ifdef notdef
300static uint32_t
301bnx_readreg_ind(struct bnx_softc *sc, uin32_t off)
302{
303 device_t dev = sc->bnx_dev;
304
305 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
306 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
307}
308#endif
309
310static void
311bnx_writereg_ind(struct bnx_softc *sc, uint32_t off, uint32_t val)
312{
313 device_t dev = sc->bnx_dev;
314
315 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
316 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
317}
318
319static void
320bnx_writemem_direct(struct bnx_softc *sc, uint32_t off, uint32_t val)
321{
322 CSR_WRITE_4(sc, off, val);
323}
324
325static void
326bnx_writembx(struct bnx_softc *sc, int off, int val)
327{
328 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906)
329 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
330
331 CSR_WRITE_4(sc, off, val);
332}
333
334static uint8_t
335bnx_nvram_getbyte(struct bnx_softc *sc, int addr, uint8_t *dest)
336{
337 uint32_t access, byte = 0;
338 int i;
339
340 /* Lock. */
341 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
342 for (i = 0; i < 8000; i++) {
343 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
344 break;
345 DELAY(20);
346 }
347 if (i == 8000)
348 return (1);
349
350 /* Enable access. */
351 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
352 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
353
354 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
355 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
356 for (i = 0; i < BNX_TIMEOUT * 10; i++) {
357 DELAY(10);
358 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
359 DELAY(10);
360 break;
361 }
362 }
363
364 if (i == BNX_TIMEOUT * 10) {
365 if_printf(&sc->arpcom.ac_if, "nvram read timed out\n");
366 return (1);
367 }
368
369 /* Get result. */
370 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
371
372 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
373
374 /* Disable access. */
375 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
376
377 /* Unlock. */
378 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
379 CSR_READ_4(sc, BGE_NVRAM_SWARB);
380
381 return (0);
382}
383
384/*
385 * Read a sequence of bytes from NVRAM.
386 */
387static int
388bnx_read_nvram(struct bnx_softc *sc, caddr_t dest, int off, int cnt)
389{
390 int err = 0, i;
391 uint8_t byte = 0;
392
393 if (sc->bnx_asicrev != BGE_ASICREV_BCM5906)
394 return (1);
395
396 for (i = 0; i < cnt; i++) {
397 err = bnx_nvram_getbyte(sc, off + i, &byte);
398 if (err)
399 break;
400 *(dest + i) = byte;
401 }
402
403 return (err ? 1 : 0);
404}
405
406/*
407 * Read a byte of data stored in the EEPROM at address 'addr.' The
408 * BCM570x supports both the traditional bitbang interface and an
409 * auto access interface for reading the EEPROM. We use the auto
410 * access method.
411 */
412static uint8_t
413bnx_eeprom_getbyte(struct bnx_softc *sc, uint32_t addr, uint8_t *dest)
414{
415 int i;
416 uint32_t byte = 0;
417
418 /*
419 * Enable use of auto EEPROM access so we can avoid
420 * having to use the bitbang method.
421 */
422 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
423
424 /* Reset the EEPROM, load the clock period. */
425 CSR_WRITE_4(sc, BGE_EE_ADDR,
426 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
427 DELAY(20);
428
429 /* Issue the read EEPROM command. */
430 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
431
432 /* Wait for completion */
433 for(i = 0; i < BNX_TIMEOUT * 10; i++) {
434 DELAY(10);
435 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
436 break;
437 }
438
439 if (i == BNX_TIMEOUT) {
440 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
441 return(1);
442 }
443
444 /* Get result. */
445 byte = CSR_READ_4(sc, BGE_EE_DATA);
446
447 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
448
449 return(0);
450}
451
452/*
453 * Read a sequence of bytes from the EEPROM.
454 */
455static int
456bnx_read_eeprom(struct bnx_softc *sc, caddr_t dest, uint32_t off, size_t len)
457{
458 size_t i;
459 int err;
460 uint8_t byte;
461
462 for (byte = 0, err = 0, i = 0; i < len; i++) {
463 err = bnx_eeprom_getbyte(sc, off + i, &byte);
464 if (err)
465 break;
466 *(dest + i) = byte;
467 }
468
469 return(err ? 1 : 0);
470}
471
472static int
473bnx_miibus_readreg(device_t dev, int phy, int reg)
474{
475 struct bnx_softc *sc = device_get_softc(dev);
476 uint32_t val;
477 int i;
478
479 KASSERT(phy == sc->bnx_phyno,
480 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno));
481
482 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
483 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
484 CSR_WRITE_4(sc, BGE_MI_MODE,
485 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
486 DELAY(80);
487 }
488
489 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
490 BGE_MIPHY(phy) | BGE_MIREG(reg));
491
492 /* Poll for the PHY register access to complete. */
493 for (i = 0; i < BNX_TIMEOUT; i++) {
494 DELAY(10);
495 val = CSR_READ_4(sc, BGE_MI_COMM);
496 if ((val & BGE_MICOMM_BUSY) == 0) {
497 DELAY(5);
498 val = CSR_READ_4(sc, BGE_MI_COMM);
499 break;
500 }
501 }
502 if (i == BNX_TIMEOUT) {
503 if_printf(&sc->arpcom.ac_if, "PHY read timed out "
504 "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val);
505 val = 0;
506 }
507
508 /* Restore the autopoll bit if necessary. */
509 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
510 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
511 DELAY(80);
512 }
513
514 if (val & BGE_MICOMM_READFAIL)
515 return 0;
516
517 return (val & 0xFFFF);
518}
519
520static int
521bnx_miibus_writereg(device_t dev, int phy, int reg, int val)
522{
523 struct bnx_softc *sc = device_get_softc(dev);
524 int i;
525
526 KASSERT(phy == sc->bnx_phyno,
527 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno));
528
529 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
530 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
531 return 0;
532
533 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
534 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
535 CSR_WRITE_4(sc, BGE_MI_MODE,
536 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
537 DELAY(80);
538 }
539
540 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
541 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
542
543 for (i = 0; i < BNX_TIMEOUT; i++) {
544 DELAY(10);
545 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
546 DELAY(5);
547 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
548 break;
549 }
550 }
551 if (i == BNX_TIMEOUT) {
552 if_printf(&sc->arpcom.ac_if, "PHY write timed out "
553 "(phy %d, reg %d, val %d)\n", phy, reg, val);
554 }
555
556 /* Restore the autopoll bit if necessary. */
557 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
558 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
559 DELAY(80);
560 }
561
562 return 0;
563}
564
565static void
566bnx_miibus_statchg(device_t dev)
567{
568 struct bnx_softc *sc;
569 struct mii_data *mii;
570
571 sc = device_get_softc(dev);
572 mii = device_get_softc(sc->bnx_miibus);
573
574 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
575 (IFM_ACTIVE | IFM_AVALID)) {
576 switch (IFM_SUBTYPE(mii->mii_media_active)) {
577 case IFM_10_T:
578 case IFM_100_TX:
579 sc->bnx_link = 1;
580 break;
581 case IFM_1000_T:
582 case IFM_1000_SX:
583 case IFM_2500_SX:
584 if (sc->bnx_asicrev != BGE_ASICREV_BCM5906)
585 sc->bnx_link = 1;
586 else
587 sc->bnx_link = 0;
588 break;
589 default:
590 sc->bnx_link = 0;
591 break;
592 }
593 } else {
594 sc->bnx_link = 0;
595 }
596 if (sc->bnx_link == 0)
597 return;
598
599 BNX_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
600 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
601 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
602 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
603 } else {
604 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
605 }
606
607 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
608 BNX_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
609 } else {
610 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
611 }
612}
613
614/*
615 * Memory management for jumbo frames.
616 */
617static int
618bnx_alloc_jumbo_mem(struct bnx_softc *sc)
619{
620 struct ifnet *ifp = &sc->arpcom.ac_if;
621 struct bnx_jslot *entry;
622 uint8_t *ptr;
623 bus_addr_t paddr;
624 int i, error;
625
626 /*
627 * Create tag for jumbo mbufs.
628 * This is really a bit of a kludge. We allocate a special
629 * jumbo buffer pool which (thanks to the way our DMA
630 * memory allocation works) will consist of contiguous
631 * pages. This means that even though a jumbo buffer might
632 * be larger than a page size, we don't really need to
633 * map it into more than one DMA segment. However, the
634 * default mbuf tag will result in multi-segment mappings,
635 * so we have to create a special jumbo mbuf tag that
636 * lets us get away with mapping the jumbo buffers as
637 * a single segment. I think eventually the driver should
638 * be changed so that it uses ordinary mbufs and cluster
639 * buffers, i.e. jumbo frames can span multiple DMA
640 * descriptors. But that's a project for another day.
641 */
642
643 /*
644 * Create DMA stuffs for jumbo RX ring.
645 */
646 error = bnx_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ,
647 &sc->bnx_cdata.bnx_rx_jumbo_ring_tag,
648 &sc->bnx_cdata.bnx_rx_jumbo_ring_map,
649 (void *)&sc->bnx_ldata.bnx_rx_jumbo_ring,
650 &sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
651 if (error) {
652 if_printf(ifp, "could not create jumbo RX ring\n");
653 return error;
654 }
655
656 /*
657 * Create DMA stuffs for jumbo buffer block.
658 */
659 error = bnx_dma_block_alloc(sc, BNX_JMEM,
660 &sc->bnx_cdata.bnx_jumbo_tag,
661 &sc->bnx_cdata.bnx_jumbo_map,
662 (void **)&sc->bnx_ldata.bnx_jumbo_buf,
663 &paddr);
664 if (error) {
665 if_printf(ifp, "could not create jumbo buffer\n");
666 return error;
667 }
668
669 SLIST_INIT(&sc->bnx_jfree_listhead);
670
671 /*
672 * Now divide it up into 9K pieces and save the addresses
673 * in an array. Note that we play an evil trick here by using
674 * the first few bytes in the buffer to hold the the address
675 * of the softc structure for this interface. This is because
676 * bnx_jfree() needs it, but it is called by the mbuf management
677 * code which will not pass it to us explicitly.
678 */
679 for (i = 0, ptr = sc->bnx_ldata.bnx_jumbo_buf; i < BNX_JSLOTS; i++) {
680 entry = &sc->bnx_cdata.bnx_jslots[i];
681 entry->bnx_sc = sc;
682 entry->bnx_buf = ptr;
683 entry->bnx_paddr = paddr;
684 entry->bnx_inuse = 0;
685 entry->bnx_slot = i;
686 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead, entry, jslot_link);
687
688 ptr += BNX_JLEN;
689 paddr += BNX_JLEN;
690 }
691 return 0;
692}
693
694static void
695bnx_free_jumbo_mem(struct bnx_softc *sc)
696{
697 /* Destroy jumbo RX ring. */
698 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_jumbo_ring_tag,
699 sc->bnx_cdata.bnx_rx_jumbo_ring_map,
700 sc->bnx_ldata.bnx_rx_jumbo_ring);
701
702 /* Destroy jumbo buffer block. */
703 bnx_dma_block_free(sc->bnx_cdata.bnx_jumbo_tag,
704 sc->bnx_cdata.bnx_jumbo_map,
705 sc->bnx_ldata.bnx_jumbo_buf);
706}
707
708/*
709 * Allocate a jumbo buffer.
710 */
711static struct bnx_jslot *
712bnx_jalloc(struct bnx_softc *sc)
713{
714 struct bnx_jslot *entry;
715
716 lwkt_serialize_enter(&sc->bnx_jslot_serializer);
717 entry = SLIST_FIRST(&sc->bnx_jfree_listhead);
718 if (entry) {
719 SLIST_REMOVE_HEAD(&sc->bnx_jfree_listhead, jslot_link);
720 entry->bnx_inuse = 1;
721 } else {
722 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
723 }
724 lwkt_serialize_exit(&sc->bnx_jslot_serializer);
725 return(entry);
726}
727
728/*
729 * Adjust usage count on a jumbo buffer.
730 */
731static void
732bnx_jref(void *arg)
733{
734 struct bnx_jslot *entry = (struct bnx_jslot *)arg;
735 struct bnx_softc *sc = entry->bnx_sc;
736
737 if (sc == NULL)
738 panic("bnx_jref: can't find softc pointer!");
739
740 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) {
741 panic("bnx_jref: asked to reference buffer "
742 "that we don't manage!");
743 } else if (entry->bnx_inuse == 0) {
744 panic("bnx_jref: buffer already free!");
745 } else {
746 atomic_add_int(&entry->bnx_inuse, 1);
747 }
748}
749
750/*
751 * Release a jumbo buffer.
752 */
753static void
754bnx_jfree(void *arg)
755{
756 struct bnx_jslot *entry = (struct bnx_jslot *)arg;
757 struct bnx_softc *sc = entry->bnx_sc;
758
759 if (sc == NULL)
760 panic("bnx_jfree: can't find softc pointer!");
761
762 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) {
763 panic("bnx_jfree: asked to free buffer that we don't manage!");
764 } else if (entry->bnx_inuse == 0) {
765 panic("bnx_jfree: buffer already free!");
766 } else {
767 /*
768 * Possible MP race to 0, use the serializer. The atomic insn
769 * is still needed for races against bnx_jref().
770 */
771 lwkt_serialize_enter(&sc->bnx_jslot_serializer);
772 atomic_subtract_int(&entry->bnx_inuse, 1);
773 if (entry->bnx_inuse == 0) {
774 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead,
775 entry, jslot_link);
776 }
777 lwkt_serialize_exit(&sc->bnx_jslot_serializer);
778 }
779}
780
781
782/*
783 * Intialize a standard receive ring descriptor.
784 */
785static int
786bnx_newbuf_std(struct bnx_softc *sc, int i, int init)
787{
788 struct mbuf *m_new = NULL;
789 bus_dma_segment_t seg;
790 bus_dmamap_t map;
791 int error, nsegs;
792
793 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
794 if (m_new == NULL)
795 return ENOBUFS;
796 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
797 m_adj(m_new, ETHER_ALIGN);
798
799 error = bus_dmamap_load_mbuf_segment(sc->bnx_cdata.bnx_rx_mtag,
800 sc->bnx_cdata.bnx_rx_tmpmap, m_new,
801 &seg, 1, &nsegs, BUS_DMA_NOWAIT);
802 if (error) {
803 m_freem(m_new);
804 return error;
805 }
806
807 if (!init) {
808 bus_dmamap_sync(sc->bnx_cdata.bnx_rx_mtag,
809 sc->bnx_cdata.bnx_rx_std_dmamap[i],
810 BUS_DMASYNC_POSTREAD);
811 bus_dmamap_unload(sc->bnx_cdata.bnx_rx_mtag,
812 sc->bnx_cdata.bnx_rx_std_dmamap[i]);
813 }
814
815 map = sc->bnx_cdata.bnx_rx_tmpmap;
816 sc->bnx_cdata.bnx_rx_tmpmap = sc->bnx_cdata.bnx_rx_std_dmamap[i];
817 sc->bnx_cdata.bnx_rx_std_dmamap[i] = map;
818
819 sc->bnx_cdata.bnx_rx_std_chain[i].bnx_mbuf = m_new;
820 sc->bnx_cdata.bnx_rx_std_chain[i].bnx_paddr = seg.ds_addr;
821
822 bnx_setup_rxdesc_std(sc, i);
823 return 0;
824}
825
826static void
827bnx_setup_rxdesc_std(struct bnx_softc *sc, int i)
828{
829 struct bnx_rxchain *rc;
830 struct bge_rx_bd *r;
831
832 rc = &sc->bnx_cdata.bnx_rx_std_chain[i];
833 r = &sc->bnx_ldata.bnx_rx_std_ring[i];
834
835 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_paddr);
836 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_paddr);
837 r->bge_len = rc->bnx_mbuf->m_len;
838 r->bge_idx = i;
839 r->bge_flags = BGE_RXBDFLAG_END;
840}
841
842/*
843 * Initialize a jumbo receive ring descriptor. This allocates
844 * a jumbo buffer from the pool managed internally by the driver.
845 */
846static int
847bnx_newbuf_jumbo(struct bnx_softc *sc, int i, int init)
848{
849 struct mbuf *m_new = NULL;
850 struct bnx_jslot *buf;
851 bus_addr_t paddr;
852
853 /* Allocate the mbuf. */
854 MGETHDR(m_new, init ? MB_WAIT : MB_DONTWAIT, MT_DATA);
855 if (m_new == NULL)
856 return ENOBUFS;
857
858 /* Allocate the jumbo buffer */
859 buf = bnx_jalloc(sc);
860 if (buf == NULL) {
861 m_freem(m_new);
862 return ENOBUFS;
863 }
864
865 /* Attach the buffer to the mbuf. */
866 m_new->m_ext.ext_arg = buf;
867 m_new->m_ext.ext_buf = buf->bnx_buf;
868 m_new->m_ext.ext_free = bnx_jfree;
869 m_new->m_ext.ext_ref = bnx_jref;
870 m_new->m_ext.ext_size = BNX_JUMBO_FRAMELEN;
871
872 m_new->m_flags |= M_EXT;
873
874 m_new->m_data = m_new->m_ext.ext_buf;
875 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
876
877 paddr = buf->bnx_paddr;
878 m_adj(m_new, ETHER_ALIGN);
879 paddr += ETHER_ALIGN;
880
881 /* Save necessary information */
882 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_mbuf = m_new;
883 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_paddr = paddr;
884
885 /* Set up the descriptor. */
886 bnx_setup_rxdesc_jumbo(sc, i);
887 return 0;
888}
889
890static void
891bnx_setup_rxdesc_jumbo(struct bnx_softc *sc, int i)
892{
893 struct bge_rx_bd *r;
894 struct bnx_rxchain *rc;
895
896 r = &sc->bnx_ldata.bnx_rx_jumbo_ring[i];
897 rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i];
898
899 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_paddr);
900 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_paddr);
901 r->bge_len = rc->bnx_mbuf->m_len;
902 r->bge_idx = i;
903 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
904}
905
906static int
907bnx_init_rx_ring_std(struct bnx_softc *sc)
908{
909 int i, error;
910
911 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
912 error = bnx_newbuf_std(sc, i, 1);
913 if (error)
914 return error;
915 };
916
917 sc->bnx_std = BGE_STD_RX_RING_CNT - 1;
918 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bnx_std);
919
920 return(0);
921}
922
923static void
924bnx_free_rx_ring_std(struct bnx_softc *sc)
925{
926 int i;
927
928 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
929 struct bnx_rxchain *rc = &sc->bnx_cdata.bnx_rx_std_chain[i];
930
931 if (rc->bnx_mbuf != NULL) {
932 bus_dmamap_unload(sc->bnx_cdata.bnx_rx_mtag,
933 sc->bnx_cdata.bnx_rx_std_dmamap[i]);
934 m_freem(rc->bnx_mbuf);
935 rc->bnx_mbuf = NULL;
936 }
937 bzero(&sc->bnx_ldata.bnx_rx_std_ring[i],
938 sizeof(struct bge_rx_bd));
939 }
940}
941
942static int
943bnx_init_rx_ring_jumbo(struct bnx_softc *sc)
944{
945 struct bge_rcb *rcb;
946 int i, error;
947
948 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
949 error = bnx_newbuf_jumbo(sc, i, 1);
950 if (error)
951 return error;
952 };
953
954 sc->bnx_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
955
956 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb;
957 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
958 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
959
960 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo);
961
962 return(0);
963}
964
965static void
966bnx_free_rx_ring_jumbo(struct bnx_softc *sc)
967{
968 int i;
969
970 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
971 struct bnx_rxchain *rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i];
972
973 if (rc->bnx_mbuf != NULL) {
974 m_freem(rc->bnx_mbuf);
975 rc->bnx_mbuf = NULL;
976 }
977 bzero(&sc->bnx_ldata.bnx_rx_jumbo_ring[i],
978 sizeof(struct bge_rx_bd));
979 }
980}
981
982static void
983bnx_free_tx_ring(struct bnx_softc *sc)
984{
985 int i;
986
987 for (i = 0; i < BGE_TX_RING_CNT; i++) {
988 if (sc->bnx_cdata.bnx_tx_chain[i] != NULL) {
989 bus_dmamap_unload(sc->bnx_cdata.bnx_tx_mtag,
990 sc->bnx_cdata.bnx_tx_dmamap[i]);
991 m_freem(sc->bnx_cdata.bnx_tx_chain[i]);
992 sc->bnx_cdata.bnx_tx_chain[i] = NULL;
993 }
994 bzero(&sc->bnx_ldata.bnx_tx_ring[i],
995 sizeof(struct bge_tx_bd));
996 }
997}
998
999static int
1000bnx_init_tx_ring(struct bnx_softc *sc)
1001{
1002 sc->bnx_txcnt = 0;
1003 sc->bnx_tx_saved_considx = 0;
1004 sc->bnx_tx_prodidx = 0;
1005
1006 /* Initialize transmit producer index for host-memory send ring. */
1007 bnx_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bnx_tx_prodidx);
1008 bnx_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1009
1010 return(0);
1011}
1012
1013static void
1014bnx_setmulti(struct bnx_softc *sc)
1015{
1016 struct ifnet *ifp;
1017 struct ifmultiaddr *ifma;
1018 uint32_t hashes[4] = { 0, 0, 0, 0 };
1019 int h, i;
1020
1021 ifp = &sc->arpcom.ac_if;
1022
1023 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1024 for (i = 0; i < 4; i++)
1025 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1026 return;
1027 }
1028
1029 /* First, zot all the existing filters. */
1030 for (i = 0; i < 4; i++)
1031 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1032
1033 /* Now program new ones. */
1034 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1035 if (ifma->ifma_addr->sa_family != AF_LINK)
1036 continue;
1037 h = ether_crc32_le(
1038 LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1039 ETHER_ADDR_LEN) & 0x7f;
1040 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1041 }
1042
1043 for (i = 0; i < 4; i++)
1044 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1045}
1046
1047/*
1048 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1049 * self-test results.
1050 */
1051static int
1052bnx_chipinit(struct bnx_softc *sc)
1053{
1054 uint32_t dma_rw_ctl, mode_ctl;
1055 int i;
1056
1057 /* Set endian type before we access any non-PCI registers. */
1058 pci_write_config(sc->bnx_dev, BGE_PCI_MISC_CTL,
1059 BGE_INIT | BGE_PCIMISCCTL_TAGGED_STATUS, 4);
1060
1061 /* Clear the MAC control register */
1062 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1063
1064 /*
1065 * Clear the MAC statistics block in the NIC's
1066 * internal memory.
1067 */
1068 for (i = BGE_STATS_BLOCK;
1069 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1070 BNX_MEMWIN_WRITE(sc, i, 0);
1071
1072 for (i = BGE_STATUS_BLOCK;
1073 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1074 BNX_MEMWIN_WRITE(sc, i, 0);
1075
d7872545
SZ
1076 if (BNX_IS_57765_FAMILY(sc)) {
1077 uint32_t val;
1078
1079 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) {
1080 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
1081 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS;
1082
1083 /* Access the lower 1K of PL PCI-E block registers. */
1084 CSR_WRITE_4(sc, BGE_MODE_CTL,
1085 val | BGE_MODECTL_PCIE_PL_SEL);
1086
1087 val = CSR_READ_4(sc, BGE_PCIE_PL_LO_PHYCTL5);
1088 val |= BGE_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ;
1089 CSR_WRITE_4(sc, BGE_PCIE_PL_LO_PHYCTL5, val);
1090
1091 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1092 }
1093 if (sc->bnx_chiprev != BGE_CHIPREV_57765_AX) {
1094 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
1095 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS;
1096
1097 /* Access the lower 1K of DL PCI-E block registers. */
1098 CSR_WRITE_4(sc, BGE_MODE_CTL,
1099 val | BGE_MODECTL_PCIE_DL_SEL);
1100
1101 val = CSR_READ_4(sc, BGE_PCIE_DL_LO_FTSMAX);
1102 val &= ~BGE_PCIE_DL_LO_FTSMAX_MASK;
1103 val |= BGE_PCIE_DL_LO_FTSMAX_VAL;
1104 CSR_WRITE_4(sc, BGE_PCIE_DL_LO_FTSMAX, val);
1105
1106 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1107 }
1108
1109 val = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK);
1110 val &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK;
1111 val |= BGE_CPMU_LSPD_10MB_MACCLK_6_25;
1112 CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, val);
1113 }
1114
2890cca3
SZ
1115 /*
1116 * Set up the PCI DMA control register.
1117 */
1118 dma_rw_ctl = pci_read_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, 4);
1119 /*
1120 * Disable 32bytes cache alignment for DMA write to host memory
1121 *
1122 * NOTE:
1123 * 64bytes cache alignment for DMA write to host memory is still
1124 * enabled.
1125 */
1126 dma_rw_ctl |= BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1127 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0)
1128 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1129 /*
1130 * Enable HW workaround for controllers that misinterpret
1131 * a status tag update and leave interrupts permanently
1132 * disabled.
1133 */
1134 if (sc->bnx_asicrev != BGE_ASICREV_BCM5717 &&
1135 !BNX_IS_57765_FAMILY(sc))
1136 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1137 if (bootverbose) {
1138 if_printf(&sc->arpcom.ac_if, "DMA read/write %#x\n",
1139 dma_rw_ctl);
6c8d8ecc
SZ
1140 }
1141 pci_write_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1142
1143 /*
1144 * Set up general mode register.
1145 */
1146 mode_ctl = bnx_dma_swap_options(sc) | BGE_MODECTL_MAC_ATTN_INTR |
1147 BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM;
1148 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1149
1150 /*
1151 * Disable memory write invalidate. Apparently it is not supported
1152 * properly by these devices. Also ensure that INTx isn't disabled,
1153 * as these chips need it even when using MSI.
1154 */
1155 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_CMD,
1156 (PCIM_CMD_MWRICEN | PCIM_CMD_INTxDIS), 4);
1157
1158 /* Set the timer prescaler (always 66Mhz) */
1159 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1160
1161 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
1162 DELAY(40); /* XXX */
1163
1164 /* Put PHY into ready state */
1165 BNX_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1166 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1167 DELAY(40);
1168 }
1169
1170 return(0);
1171}
1172
1173static int
1174bnx_blockinit(struct bnx_softc *sc)
1175{
1176 struct bge_rcb *rcb;
1177 bus_size_t vrcb;
1178 bge_hostaddr taddr;
1179 uint32_t val;
1180 int i, limit;
1181
1182 /*
1183 * Initialize the memory window pointer register so that
1184 * we can access the first 32K of internal NIC RAM. This will
1185 * allow us to set up the TX send ring RCBs and the RX return
1186 * ring RCBs, plus other things which live in NIC memory.
1187 */
1188 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1189
1190 /* Configure mbuf pool watermarks */
f368d0d9 1191 if (BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
1192 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1193 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) {
1194 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1195 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1196 } else {
1197 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1198 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1199 }
1200 } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
1201 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1202 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1203 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1204 } else {
1205 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1206 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1207 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1208 }
1209
1210 /* Configure DMA resource watermarks */
1211 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1212 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1213
1214 /* Enable buffer manager */
1215 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1216 /*
1217 * Change the arbitration algorithm of TXMBUF read request to
1218 * round-robin instead of priority based for BCM5719. When
1219 * TXFIFO is almost empty, RDMA will hold its request until
1220 * TXFIFO is not almost empty.
1221 */
1222 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719)
1223 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
e5eebe34
SZ
1224 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1225 sc->bnx_chipid == BGE_CHIPID_BCM5719_A0 ||
1226 sc->bnx_chipid == BGE_CHIPID_BCM5720_A0)
1227 val |= BGE_BMANMODE_LOMBUF_ATTN;
6c8d8ecc
SZ
1228 CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1229
1230 /* Poll for buffer manager start indication */
1231 for (i = 0; i < BNX_TIMEOUT; i++) {
1232 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1233 break;
1234 DELAY(10);
1235 }
1236
1237 if (i == BNX_TIMEOUT) {
1238 if_printf(&sc->arpcom.ac_if,
1239 "buffer manager failed to start\n");
1240 return(ENXIO);
1241 }
1242
1243 /* Enable flow-through queues */
1244 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1245 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1246
1247 /* Wait until queue initialization is complete */
1248 for (i = 0; i < BNX_TIMEOUT; i++) {
1249 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1250 break;
1251 DELAY(10);
1252 }
1253
1254 if (i == BNX_TIMEOUT) {
1255 if_printf(&sc->arpcom.ac_if,
1256 "flow-through queue init failed\n");
1257 return(ENXIO);
1258 }
1259
1260 /*
1261 * Summary of rings supported by the controller:
1262 *
1263 * Standard Receive Producer Ring
1264 * - This ring is used to feed receive buffers for "standard"
1265 * sized frames (typically 1536 bytes) to the controller.
1266 *
1267 * Jumbo Receive Producer Ring
1268 * - This ring is used to feed receive buffers for jumbo sized
1269 * frames (i.e. anything bigger than the "standard" frames)
1270 * to the controller.
1271 *
1272 * Mini Receive Producer Ring
1273 * - This ring is used to feed receive buffers for "mini"
1274 * sized frames to the controller.
1275 * - This feature required external memory for the controller
1276 * but was never used in a production system. Should always
1277 * be disabled.
1278 *
1279 * Receive Return Ring
1280 * - After the controller has placed an incoming frame into a
1281 * receive buffer that buffer is moved into a receive return
1282 * ring. The driver is then responsible to passing the
1283 * buffer up to the stack. Many versions of the controller
1284 * support multiple RR rings.
1285 *
1286 * Send Ring
1287 * - This ring is used for outgoing frames. Many versions of
1288 * the controller support multiple send rings.
1289 */
1290
1291 /* Initialize the standard receive producer ring control block. */
1292 rcb = &sc->bnx_ldata.bnx_info.bnx_std_rx_rcb;
1293 rcb->bge_hostaddr.bge_addr_lo =
1294 BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_std_ring_paddr);
1295 rcb->bge_hostaddr.bge_addr_hi =
1296 BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_std_ring_paddr);
f368d0d9 1297 if (BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
1298 /*
1299 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1300 * Bits 15-2 : Maximum RX frame size
1301 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
1302 * Bit 0 : Reserved
1303 */
1304 rcb->bge_maxlen_flags =
1305 BGE_RCB_MAXLEN_FLAGS(512, BNX_MAX_FRAMELEN << 2);
1306 } else {
1307 /*
1308 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1309 * Bits 15-2 : Reserved (should be 0)
1310 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1311 * Bit 0 : Reserved
1312 */
1313 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1314 }
303fdc72 1315 if (BNX_IS_5717_PLUS(sc))
6c8d8ecc
SZ
1316 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1317 else
1318 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1319 /* Write the standard receive producer ring control block. */
1320 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1321 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1322 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1323 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1324 /* Reset the standard receive producer ring producer index. */
1325 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1326
1327 /*
1328 * Initialize the jumbo RX producer ring control
1329 * block. We set the 'ring disabled' bit in the
1330 * flags field until we're actually ready to start
1331 * using this ring (i.e. once we set the MTU
1332 * high enough to require it).
1333 */
1334 if (BNX_IS_JUMBO_CAPABLE(sc)) {
1335 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb;
1336 /* Get the jumbo receive producer ring RCB parameters. */
1337 rcb->bge_hostaddr.bge_addr_lo =
1338 BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
1339 rcb->bge_hostaddr.bge_addr_hi =
1340 BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
1341 rcb->bge_maxlen_flags =
1342 BGE_RCB_MAXLEN_FLAGS(BNX_MAX_FRAMELEN,
1343 BGE_RCB_FLAG_RING_DISABLED);
303fdc72 1344 if (BNX_IS_5717_PLUS(sc))
6c8d8ecc
SZ
1345 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1346 else
1347 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1348 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1349 rcb->bge_hostaddr.bge_addr_hi);
1350 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1351 rcb->bge_hostaddr.bge_addr_lo);
1352 /* Program the jumbo receive producer ring RCB parameters. */
1353 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1354 rcb->bge_maxlen_flags);
1355 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1356 /* Reset the jumbo receive producer ring producer index. */
1357 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1358 }
1359
1360 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
1361 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
1362 (sc->bnx_chipid == BGE_CHIPID_BCM5906_A0 ||
1363 sc->bnx_chipid == BGE_CHIPID_BCM5906_A1 ||
1364 sc->bnx_chipid == BGE_CHIPID_BCM5906_A2)) {
1365 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1366 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1367 }
1368
1369 /*
1370 * The BD ring replenish thresholds control how often the
1371 * hardware fetches new BD's from the producer rings in host
1372 * memory. Setting the value too low on a busy system can
1373 * starve the hardware and recue the throughpout.
1374 *
1375 * Set the BD ring replentish thresholds. The recommended
1376 * values are 1/8th the number of descriptors allocated to
1377 * each ring.
1378 */
1379 val = 8;
1380 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1381 if (BNX_IS_JUMBO_CAPABLE(sc)) {
1382 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1383 BGE_JUMBO_RX_RING_CNT/8);
1384 }
f368d0d9 1385 if (BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
1386 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1387 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1388 }
1389
1390 /*
1391 * Disable all send rings by setting the 'ring disabled' bit
1392 * in the flags field of all the TX send ring control blocks,
1393 * located in NIC memory.
1394 */
80969639
SZ
1395 if (BNX_IS_5717_PLUS(sc))
1396 limit = 4;
4f23029e
SZ
1397 else if (BNX_IS_57765_FAMILY(sc))
1398 limit = 2;
80969639
SZ
1399 else
1400 limit = 1;
6c8d8ecc
SZ
1401 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1402 for (i = 0; i < limit; i++) {
1403 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1404 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1405 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1406 vrcb += sizeof(struct bge_rcb);
1407 }
1408
1409 /* Configure send ring RCB 0 (we use only the first ring) */
1410 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1411 BGE_HOSTADDR(taddr, sc->bnx_ldata.bnx_tx_ring_paddr);
1412 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1413 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
303fdc72 1414 if (BNX_IS_5717_PLUS(sc)) {
6c8d8ecc
SZ
1415 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
1416 } else {
1417 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1418 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1419 }
1420 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1421 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1422
1423 /*
1424 * Disable all receive return rings by setting the
1425 * 'ring disabled' bit in the flags field of all the receive
1426 * return ring control blocks, located in NIC memory.
1427 */
80969639 1428 if (BNX_IS_5717_PLUS(sc)) {
6c8d8ecc
SZ
1429 /* Should be 17, use 16 until we get an SRAM map. */
1430 limit = 16;
4f23029e 1431 } else if (BNX_IS_57765_FAMILY(sc)) {
6c8d8ecc
SZ
1432 limit = 4;
1433 } else {
1434 limit = 1;
1435 }
1436 /* Disable all receive return rings. */
1437 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1438 for (i = 0; i < limit; i++) {
1439 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1440 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1441 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1442 BGE_RCB_FLAG_RING_DISABLED);
1443 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1444 bnx_writembx(sc, BGE_MBX_RX_CONS0_LO +
1445 (i * (sizeof(uint64_t))), 0);
1446 vrcb += sizeof(struct bge_rcb);
1447 }
1448
1449 /*
1450 * Set up receive return ring 0. Note that the NIC address
1451 * for RX return rings is 0x0. The return rings live entirely
1452 * within the host, so the nicaddr field in the RCB isn't used.
1453 */
1454 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1455 BGE_HOSTADDR(taddr, sc->bnx_ldata.bnx_rx_return_ring_paddr);
1456 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1457 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1458 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1459 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1460 BGE_RCB_MAXLEN_FLAGS(sc->bnx_return_ring_cnt, 0));
1461
1462 /* Set random backoff seed for TX */
1463 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1464 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1465 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1466 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1467 BGE_TX_BACKOFF_SEED_MASK);
1468
1469 /* Set inter-packet gap */
1470 val = 0x2620;
1471 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1472 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
1473 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
1474 }
1475 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
1476
1477 /*
1478 * Specify which ring to use for packets that don't match
1479 * any RX rules.
1480 */
1481 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1482
1483 /*
1484 * Configure number of RX lists. One interrupt distribution
1485 * list, sixteen active lists, one bad frames class.
1486 */
1487 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1488
1489 /* Inialize RX list placement stats mask. */
1490 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1491 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1492
1493 /* Disable host coalescing until we get it set up */
1494 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1495
1496 /* Poll to make sure it's shut down. */
1497 for (i = 0; i < BNX_TIMEOUT; i++) {
1498 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1499 break;
1500 DELAY(10);
1501 }
1502
1503 if (i == BNX_TIMEOUT) {
1504 if_printf(&sc->arpcom.ac_if,
1505 "host coalescing engine failed to idle\n");
1506 return(ENXIO);
1507 }
1508
1509 /* Set up host coalescing defaults */
1510 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bnx_rx_coal_ticks);
1511 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bnx_tx_coal_ticks);
1512 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bnx_rx_coal_bds);
1513 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bnx_tx_coal_bds);
1514 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, sc->bnx_rx_coal_bds_int);
1515 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, sc->bnx_tx_coal_bds_int);
1516
1517 /* Set up address of status block */
1518 bzero(sc->bnx_ldata.bnx_status_block, BGE_STATUS_BLK_SZ);
1519 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1520 BGE_ADDR_HI(sc->bnx_ldata.bnx_status_block_paddr));
1521 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1522 BGE_ADDR_LO(sc->bnx_ldata.bnx_status_block_paddr));
1523
1524 /* Set up status block partail update size. */
1525 val = BGE_STATBLKSZ_32BYTE;
1526#if 0
1527 /*
1528 * Does not seem to have visible effect in both
1529 * bulk data (1472B UDP datagram) and tiny data
1530 * (18B UDP datagram) TX tests.
1531 */
1532 val |= BGE_HCCMODE_CLRTICK_TX;
1533#endif
1534 /* Turn on host coalescing state machine */
1535 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1536
1537 /* Turn on RX BD completion state machine and enable attentions */
1538 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1539 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1540
1541 /* Turn on RX list placement state machine */
1542 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1543
1544 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1545 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1546 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1547 BGE_MACMODE_FRMHDR_DMA_ENB;
1548
1549 if (sc->bnx_flags & BNX_FLAG_TBI)
1550 val |= BGE_PORTMODE_TBI;
1551 else if (sc->bnx_flags & BNX_FLAG_MII_SERDES)
1552 val |= BGE_PORTMODE_GMII;
1553 else
1554 val |= BGE_PORTMODE_MII;
1555
1556 /* Turn on DMA, clear stats */
1557 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1558
1559 /* Set misc. local control, enable interrupts on attentions */
1560 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1561
1562#ifdef notdef
1563 /* Assert GPIO pins for PHY reset */
1564 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1565 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1566 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1567 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1568#endif
1569
1570 /* Turn on write DMA state machine */
1571 val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1572 /* Enable host coalescing bug fix. */
1573 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1574 if (sc->bnx_asicrev == BGE_ASICREV_BCM5785) {
1575 /* Request larger DMA burst size to get better performance. */
1576 val |= BGE_WDMAMODE_BURST_ALL_DATA;
1577 }
1578 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1579 DELAY(40);
1580
3730a14d 1581 if (BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
1582 uint32_t dmactl;
1583
1584 dmactl = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL);
1585 /*
1586 * Adjust tx margin to prevent TX data corruption and
1587 * fix internal FIFO overflow.
1588 */
1589 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1590 sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1591 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
1592 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
1593 BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
1594 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
1595 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
1596 BGE_RDMA_RSRVCTRL_TXMRGN_320B;
1597 }
1598 /*
1599 * Enable fix for read DMA FIFO overruns.
1600 * The fix is to limit the number of RX BDs
1601 * the hardware would fetch at a fime.
1602 */
1603 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL,
1604 dmactl | BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
1605 }
1606
1607 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) {
1608 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
1609 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
1610 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
1611 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
1612 } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1613 /*
1614 * Allow 4KB burst length reads for non-LSO frames.
1615 * Enable 512B burst length reads for buffer descriptors.
1616 */
1617 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
1618 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
1619 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
1620 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
1621 }
1622
1623 /* Turn on read DMA state machine */
1624 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1625 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717)
1626 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
1627 if (sc->bnx_asicrev == BGE_ASICREV_BCM5784 ||
1628 sc->bnx_asicrev == BGE_ASICREV_BCM5785 ||
1629 sc->bnx_asicrev == BGE_ASICREV_BCM57780) {
1630 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1631 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1632 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1633 }
1634 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1635 val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
1636 BGE_RDMAMODE_H2BNC_VLAN_DET;
1637 /*
1638 * Allow multiple outstanding read requests from
1639 * non-LSO read DMA engine.
1640 */
1641 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
1642 }
66deb1c1
SZ
1643 if (sc->bnx_flags & BNX_FLAG_TSO)
1644 val |= BGE_RDMAMODE_TSO4_ENABLE;
6c8d8ecc
SZ
1645 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1646 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1647 DELAY(40);
1648
1649 /* Turn on RX data completion state machine */
1650 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1651
1652 /* Turn on RX BD initiator state machine */
1653 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1654
1655 /* Turn on RX data and RX BD initiator state machine */
1656 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1657
1658 /* Turn on send BD completion state machine */
1659 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1660
1661 /* Turn on send data completion state machine */
1662 val = BGE_SDCMODE_ENABLE;
1663 if (sc->bnx_asicrev == BGE_ASICREV_BCM5761)
1664 val |= BGE_SDCMODE_CDELAY;
1665 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1666
1667 /* Turn on send data initiator state machine */
66deb1c1
SZ
1668 if (sc->bnx_flags & BNX_FLAG_TSO) {
1669 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
1670 BGE_SDIMODE_HW_LSO_PRE_DMA);
1671 } else {
1672 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1673 }
6c8d8ecc
SZ
1674
1675 /* Turn on send BD initiator state machine */
1676 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1677
1678 /* Turn on send BD selector state machine */
1679 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1680
1681 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1682 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1683 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1684
1685 /* ack/clear link change events */
1686 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1687 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1688 BGE_MACSTAT_LINK_CHANGED);
1689 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1690
1691 /*
1692 * Enable attention when the link has changed state for
1693 * devices that use auto polling.
1694 */
1695 if (sc->bnx_flags & BNX_FLAG_TBI) {
1696 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1697 } else {
1698 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
1699 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
1700 DELAY(80);
1701 }
1702 }
1703
1704 /*
1705 * Clear any pending link state attention.
1706 * Otherwise some link state change events may be lost until attention
1707 * is cleared by bnx_intr() -> bnx_softc.bnx_link_upd() sequence.
1708 * It's not necessary on newer BCM chips - perhaps enabling link
1709 * state change attentions implies clearing pending attention.
1710 */
1711 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1712 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1713 BGE_MACSTAT_LINK_CHANGED);
1714
1715 /* Enable link state change attentions. */
1716 BNX_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1717
1718 return(0);
1719}
1720
1721/*
1722 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1723 * against our list and return its name if we find a match. Note
1724 * that since the Broadcom controller contains VPD support, we
1725 * can get the device name string from the controller itself instead
1726 * of the compiled-in string. This is a little slow, but it guarantees
1727 * we'll always announce the right product name.
1728 */
1729static int
1730bnx_probe(device_t dev)
1731{
1732 const struct bnx_type *t;
1733 uint16_t product, vendor;
1734
1735 if (!pci_is_pcie(dev))
1736 return ENXIO;
1737
1738 product = pci_get_device(dev);
1739 vendor = pci_get_vendor(dev);
1740
1741 for (t = bnx_devs; t->bnx_name != NULL; t++) {
1742 if (vendor == t->bnx_vid && product == t->bnx_did)
1743 break;
1744 }
1745 if (t->bnx_name == NULL)
1746 return ENXIO;
1747
1748 device_set_desc(dev, t->bnx_name);
1749 return 0;
1750}
1751
1752static int
1753bnx_attach(device_t dev)
1754{
1755 struct ifnet *ifp;
1756 struct bnx_softc *sc;
1757 uint32_t hwcfg = 0, misccfg;
1758 int error = 0, rid, capmask;
1759 uint8_t ether_addr[ETHER_ADDR_LEN];
1760 uint16_t product, vendor;
1761 driver_intr_t *intr_func;
1762 uintptr_t mii_priv = 0;
1763 u_int intr_flags;
66deb1c1
SZ
1764#ifdef BNX_TSO_DEBUG
1765 char desc[32];
1766 int i;
1767#endif
6c8d8ecc
SZ
1768
1769 sc = device_get_softc(dev);
1770 sc->bnx_dev = dev;
50668ed5 1771 callout_init_mp(&sc->bnx_stat_timer);
df9ccc98 1772 callout_init_mp(&sc->bnx_intr_timer);
6c8d8ecc
SZ
1773 lwkt_serialize_init(&sc->bnx_jslot_serializer);
1774
1775 product = pci_get_device(dev);
1776 vendor = pci_get_vendor(dev);
1777
1778#ifndef BURN_BRIDGES
1779 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1780 uint32_t irq, mem;
1781
1782 irq = pci_read_config(dev, PCIR_INTLINE, 4);
1783 mem = pci_read_config(dev, BGE_PCI_BAR0, 4);
1784
1785 device_printf(dev, "chip is in D%d power mode "
1786 "-- setting to D0\n", pci_get_powerstate(dev));
1787
1788 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1789
1790 pci_write_config(dev, PCIR_INTLINE, irq, 4);
1791 pci_write_config(dev, BGE_PCI_BAR0, mem, 4);
1792 }
1793#endif /* !BURN_BRIDGE */
1794
1795 /*
1796 * Map control/status registers.
1797 */
1798 pci_enable_busmaster(dev);
1799
1800 rid = BGE_PCI_BAR0;
1801 sc->bnx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1802 RF_ACTIVE);
1803
1804 if (sc->bnx_res == NULL) {
1805 device_printf(dev, "couldn't map memory\n");
1806 return ENXIO;
1807 }
1808
1809 sc->bnx_btag = rman_get_bustag(sc->bnx_res);
1810 sc->bnx_bhandle = rman_get_bushandle(sc->bnx_res);
1811
1812 /* Save various chip information */
1813 sc->bnx_chipid =
1814 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
1815 BGE_PCIMISCCTL_ASICREV_SHIFT;
1816 if (BGE_ASICREV(sc->bnx_chipid) == BGE_ASICREV_USE_PRODID_REG) {
1817 /* All chips having dedicated ASICREV register have CPMU */
1818 sc->bnx_flags |= BNX_FLAG_CPMU;
1819
1820 switch (product) {
1821 case PCI_PRODUCT_BROADCOM_BCM5717:
1822 case PCI_PRODUCT_BROADCOM_BCM5718:
1823 case PCI_PRODUCT_BROADCOM_BCM5719:
1824 case PCI_PRODUCT_BROADCOM_BCM5720_ALT:
1825 sc->bnx_chipid = pci_read_config(dev,
1826 BGE_PCI_GEN2_PRODID_ASICREV, 4);
1827 break;
1828
1829 case PCI_PRODUCT_BROADCOM_BCM57761:
32ff3c80 1830 case PCI_PRODUCT_BROADCOM_BCM57762:
6c8d8ecc 1831 case PCI_PRODUCT_BROADCOM_BCM57765:
32ff3c80 1832 case PCI_PRODUCT_BROADCOM_BCM57766:
6c8d8ecc 1833 case PCI_PRODUCT_BROADCOM_BCM57781:
32ff3c80 1834 case PCI_PRODUCT_BROADCOM_BCM57782:
6c8d8ecc 1835 case PCI_PRODUCT_BROADCOM_BCM57785:
32ff3c80 1836 case PCI_PRODUCT_BROADCOM_BCM57786:
6c8d8ecc
SZ
1837 case PCI_PRODUCT_BROADCOM_BCM57791:
1838 case PCI_PRODUCT_BROADCOM_BCM57795:
1839 sc->bnx_chipid = pci_read_config(dev,
1840 BGE_PCI_GEN15_PRODID_ASICREV, 4);
1841 break;
1842
1843 default:
1844 sc->bnx_chipid = pci_read_config(dev,
1845 BGE_PCI_PRODID_ASICREV, 4);
1846 break;
1847 }
1848 }
1849 sc->bnx_asicrev = BGE_ASICREV(sc->bnx_chipid);
1850 sc->bnx_chiprev = BGE_CHIPREV(sc->bnx_chipid);
1851
1852 switch (sc->bnx_asicrev) {
1853 case BGE_ASICREV_BCM5717:
1854 case BGE_ASICREV_BCM5719:
1855 case BGE_ASICREV_BCM5720:
f368d0d9
SZ
1856 sc->bnx_flags |= BNX_FLAG_5717_PLUS | BNX_FLAG_57765_PLUS;
1857 break;
1858
6c8d8ecc 1859 case BGE_ASICREV_BCM57765:
32ff3c80 1860 case BGE_ASICREV_BCM57766:
f368d0d9 1861 sc->bnx_flags |= BNX_FLAG_57765_FAMILY | BNX_FLAG_57765_PLUS;
6c8d8ecc
SZ
1862 break;
1863 }
1864 sc->bnx_flags |= BNX_FLAG_SHORTDMA;
1865
66deb1c1
SZ
1866 sc->bnx_flags |= BNX_FLAG_TSO;
1867 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 &&
1868 sc->bnx_chipid == BGE_CHIPID_BCM5719_A0)
1869 sc->bnx_flags &= ~BNX_FLAG_TSO;
1870
df9ccc98
SZ
1871 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1872 BNX_IS_57765_FAMILY(sc)) {
1873 /*
1874 * All BCM57785 and BCM5718 families chips have a bug that
1875 * under certain situation interrupt will not be enabled
1876 * even if status tag is written to BGE_MBX_IRQ0_LO mailbox.
1877 *
1878 * While BCM5719 and BCM5720 have a hardware workaround
1879 * which could fix the above bug.
1880 * See the comment near BGE_PCIDMARWCTL_TAGGED_STATUS_WA in
1881 * bnx_chipinit().
1882 *
1883 * For the rest of the chips in these two families, we will
1884 * have to poll the status block at high rate (10ms currently)
1885 * to check whether the interrupt is hosed or not.
1886 * See bnx_intr_check() for details.
1887 */
1888 sc->bnx_flags |= BNX_FLAG_STATUSTAG_BUG;
1889 }
1890
6c8d8ecc
SZ
1891 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK;
1892
1893 sc->bnx_pciecap = pci_get_pciecap_ptr(sc->bnx_dev);
1894 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1895 sc->bnx_asicrev == BGE_ASICREV_BCM5720)
1896 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_2048);
1897 else
1898 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096);
1899 device_printf(dev, "CHIP ID 0x%08x; "
1900 "ASIC REV 0x%02x; CHIP REV 0x%02x\n",
1901 sc->bnx_chipid, sc->bnx_asicrev, sc->bnx_chiprev);
1902
1903 /*
1904 * Set various PHY quirk flags.
1905 */
1906
1907 capmask = MII_CAPMASK_DEFAULT;
46283a40
SZ
1908 if (product == PCI_PRODUCT_BROADCOM_BCM57791 ||
1909 product == PCI_PRODUCT_BROADCOM_BCM57795) {
6c8d8ecc
SZ
1910 /* 10/100 only */
1911 capmask &= ~BMSR_EXTSTAT;
1912 }
1913
1914 mii_priv |= BRGPHY_FLAG_WIRESPEED;
1915
6c8d8ecc
SZ
1916 /*
1917 * Allocate interrupt
1918 */
1919 sc->bnx_irq_type = pci_alloc_1intr(dev, bnx_msi_enable, &sc->bnx_irq_rid,
1920 &intr_flags);
1921
1922 sc->bnx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->bnx_irq_rid,
1923 intr_flags);
1924 if (sc->bnx_irq == NULL) {
1925 device_printf(dev, "couldn't map interrupt\n");
1926 error = ENXIO;
1927 goto fail;
1928 }
1929
1930 if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) {
1931 sc->bnx_flags |= BNX_FLAG_ONESHOT_MSI;
1932 bnx_enable_msi(sc);
1933 }
1934
1935 /* Initialize if_name earlier, so if_printf could be used */
1936 ifp = &sc->arpcom.ac_if;
1937 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1938
1939 /* Try to reset the chip. */
1940 bnx_reset(sc);
1941
1942 if (bnx_chipinit(sc)) {
1943 device_printf(dev, "chip initialization failed\n");
1944 error = ENXIO;
1945 goto fail;
1946 }
1947
1948 /*
1949 * Get station address
1950 */
1951 error = bnx_get_eaddr(sc, ether_addr);
1952 if (error) {
1953 device_printf(dev, "failed to read station address\n");
1954 goto fail;
1955 }
1956
f368d0d9 1957 if (BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
1958 sc->bnx_return_ring_cnt = BGE_RETURN_RING_CNT;
1959 } else {
1960 /* 5705/5750 limits RX return ring to 512 entries. */
1961 sc->bnx_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1962 }
1963
1964 error = bnx_dma_alloc(sc);
1965 if (error)
1966 goto fail;
1967
1968 /* Set default tuneable values. */
1969 sc->bnx_rx_coal_ticks = BNX_RX_COAL_TICKS_DEF;
1970 sc->bnx_tx_coal_ticks = BNX_TX_COAL_TICKS_DEF;
1971 sc->bnx_rx_coal_bds = BNX_RX_COAL_BDS_DEF;
1972 sc->bnx_tx_coal_bds = BNX_TX_COAL_BDS_DEF;
306e5498
SZ
1973 sc->bnx_rx_coal_bds_int = BNX_RX_COAL_BDS_INT_DEF;
1974 sc->bnx_tx_coal_bds_int = BNX_TX_COAL_BDS_INT_DEF;
6c8d8ecc
SZ
1975
1976 /* Set up ifnet structure */
1977 ifp->if_softc = sc;
1978 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1979 ifp->if_ioctl = bnx_ioctl;
1980 ifp->if_start = bnx_start;
39a8d43a
SZ
1981#ifdef IFPOLL_ENABLE
1982 ifp->if_npoll = bnx_npoll;
6c8d8ecc
SZ
1983#endif
1984 ifp->if_watchdog = bnx_watchdog;
1985 ifp->if_init = bnx_init;
1986 ifp->if_mtu = ETHERMTU;
1987 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1988 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1989 ifq_set_ready(&ifp->if_snd);
1990
1991 ifp->if_capabilities |= IFCAP_HWCSUM;
1992 ifp->if_hwassist = BNX_CSUM_FEATURES;
66deb1c1
SZ
1993 if (sc->bnx_flags & BNX_FLAG_TSO) {
1994 ifp->if_capabilities |= IFCAP_TSO;
1995 ifp->if_hwassist |= CSUM_TSO;
1996 }
6c8d8ecc
SZ
1997 ifp->if_capenable = ifp->if_capabilities;
1998
1999 /*
2000 * Figure out what sort of media we have by checking the
2001 * hardware config word in the first 32k of NIC internal memory,
2002 * or fall back to examining the EEPROM if necessary.
2003 * Note: on some BCM5700 cards, this value appears to be unset.
2004 * If that's the case, we have to rely on identifying the NIC
2005 * by its PCI subsystem ID, as we do below for the SysKonnect
2006 * SK-9D41.
2007 */
2008 if (bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) {
2009 hwcfg = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2010 } else {
2011 if (bnx_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2012 sizeof(hwcfg))) {
2013 device_printf(dev, "failed to read EEPROM\n");
2014 error = ENXIO;
2015 goto fail;
2016 }
2017 hwcfg = ntohl(hwcfg);
2018 }
2019
2020 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2021 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41 ||
2022 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2023 sc->bnx_flags |= BNX_FLAG_TBI;
2024
2025 /* Setup MI MODE */
2026 if (sc->bnx_flags & BNX_FLAG_CPMU)
2027 sc->bnx_mi_mode = BGE_MIMODE_500KHZ_CONST;
2028 else
2029 sc->bnx_mi_mode = BGE_MIMODE_BASE;
2030
2031 /* Setup link status update stuffs */
2032 if (sc->bnx_flags & BNX_FLAG_TBI) {
2033 sc->bnx_link_upd = bnx_tbi_link_upd;
2034 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
2035 } else if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
2036 sc->bnx_link_upd = bnx_autopoll_link_upd;
2037 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
2038 } else {
2039 sc->bnx_link_upd = bnx_copper_link_upd;
2040 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
2041 }
2042
2043 /* Set default PHY address */
2044 sc->bnx_phyno = 1;
2045
2046 /*
2047 * PHY address mapping for various devices.
2048 *
2049 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2050 * ---------+-------+-------+-------+-------+
2051 * BCM57XX | 1 | X | X | X |
2052 * BCM5704 | 1 | X | 1 | X |
2053 * BCM5717 | 1 | 8 | 2 | 9 |
2054 * BCM5719 | 1 | 8 | 2 | 9 |
2055 * BCM5720 | 1 | 8 | 2 | 9 |
2056 *
2057 * Other addresses may respond but they are not
2058 * IEEE compliant PHYs and should be ignored.
2059 */
80969639 2060 if (BNX_IS_5717_PLUS(sc)) {
6c8d8ecc
SZ
2061 int f;
2062
2063 f = pci_get_function(dev);
2064 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_A0) {
2065 if (CSR_READ_4(sc, BGE_SGDIG_STS) &
2066 BGE_SGDIGSTS_IS_SERDES)
2067 sc->bnx_phyno = f + 8;
2068 else
2069 sc->bnx_phyno = f + 1;
2070 } else {
2071 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2072 BGE_CPMU_PHY_STRAP_IS_SERDES)
2073 sc->bnx_phyno = f + 8;
2074 else
2075 sc->bnx_phyno = f + 1;
2076 }
2077 }
2078
2079 if (sc->bnx_flags & BNX_FLAG_TBI) {
2080 ifmedia_init(&sc->bnx_ifmedia, IFM_IMASK,
2081 bnx_ifmedia_upd, bnx_ifmedia_sts);
2082 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2083 ifmedia_add(&sc->bnx_ifmedia,
2084 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2085 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2086 ifmedia_set(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO);
2087 sc->bnx_ifmedia.ifm_media = sc->bnx_ifmedia.ifm_cur->ifm_media;
2088 } else {
2089 struct mii_probe_args mii_args;
2090
2091 mii_probe_args_init(&mii_args, bnx_ifmedia_upd, bnx_ifmedia_sts);
2092 mii_args.mii_probemask = 1 << sc->bnx_phyno;
2093 mii_args.mii_capmask = capmask;
2094 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY;
2095 mii_args.mii_priv = mii_priv;
2096
2097 error = mii_probe(dev, &sc->bnx_miibus, &mii_args);
2098 if (error) {
2099 device_printf(dev, "MII without any PHY!\n");
2100 goto fail;
2101 }
2102 }
2103
39a8d43a 2104#ifdef IFPOLL_ENABLE
0c7fdccd 2105 sc->bnx_npoll_stfrac = 40 - 1; /* 1/40 polling freq */
39a8d43a
SZ
2106 sc->bnx_npoll_cpuid = device_get_unit(dev) % ncpus2;
2107#endif
2108
6c8d8ecc
SZ
2109 /*
2110 * Create sysctl nodes.
2111 */
2112 sysctl_ctx_init(&sc->bnx_sysctl_ctx);
2113 sc->bnx_sysctl_tree = SYSCTL_ADD_NODE(&sc->bnx_sysctl_ctx,
2114 SYSCTL_STATIC_CHILDREN(_hw),
2115 OID_AUTO,
2116 device_get_nameunit(dev),
2117 CTLFLAG_RD, 0, "");
2118 if (sc->bnx_sysctl_tree == NULL) {
2119 device_printf(dev, "can't add sysctl node\n");
2120 error = ENXIO;
2121 goto fail;
2122 }
2123
2124 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2125 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2126 OID_AUTO, "rx_coal_ticks",
2127 CTLTYPE_INT | CTLFLAG_RW,
2128 sc, 0, bnx_sysctl_rx_coal_ticks, "I",
2129 "Receive coalescing ticks (usec).");
2130 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2131 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2132 OID_AUTO, "tx_coal_ticks",
2133 CTLTYPE_INT | CTLFLAG_RW,
2134 sc, 0, bnx_sysctl_tx_coal_ticks, "I",
2135 "Transmit coalescing ticks (usec).");
2136 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2137 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2138 OID_AUTO, "rx_coal_bds",
2139 CTLTYPE_INT | CTLFLAG_RW,
2140 sc, 0, bnx_sysctl_rx_coal_bds, "I",
2141 "Receive max coalesced BD count.");
2142 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2143 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2144 OID_AUTO, "tx_coal_bds",
2145 CTLTYPE_INT | CTLFLAG_RW,
2146 sc, 0, bnx_sysctl_tx_coal_bds, "I",
2147 "Transmit max coalesced BD count.");
2148 /*
2149 * A common design characteristic for many Broadcom
2150 * client controllers is that they only support a
2151 * single outstanding DMA read operation on the PCIe
2152 * bus. This means that it will take twice as long to
2153 * fetch a TX frame that is split into header and
2154 * payload buffers as it does to fetch a single,
2155 * contiguous TX frame (2 reads vs. 1 read). For these
2156 * controllers, coalescing buffers to reduce the number
2157 * of memory reads is effective way to get maximum
2158 * performance(about 940Mbps). Without collapsing TX
2159 * buffers the maximum TCP bulk transfer performance
2160 * is about 850Mbps. However forcing coalescing mbufs
2161 * consumes a lot of CPU cycles, so leave it off by
2162 * default.
2163 */
2164 SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx,
2165 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2166 "force_defrag", CTLFLAG_RW, &sc->bnx_force_defrag, 0,
2167 "Force defragment on TX path");
2168
39a8d43a
SZ
2169#ifdef IFPOLL_ENABLE
2170 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2171 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2172 "npoll_stfrac", CTLTYPE_INT | CTLFLAG_RW,
2173 sc, 0, bnx_sysctl_npoll_stfrac, "I", "polling status frac");
2174 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2175 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2176 "npoll_cpuid", CTLTYPE_INT | CTLFLAG_RW,
2177 sc, 0, bnx_sysctl_npoll_cpuid, "I", "polling cpuid");
2178#endif
2179
6c8d8ecc
SZ
2180 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2181 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2182 "rx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2183 sc, 0, bnx_sysctl_rx_coal_bds_int, "I",
2184 "Receive max coalesced BD count during interrupt.");
2185 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2186 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2187 "tx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2188 sc, 0, bnx_sysctl_tx_coal_bds_int, "I",
2189 "Transmit max coalesced BD count during interrupt.");
2190
66deb1c1
SZ
2191#ifdef BNX_TSO_DEBUG
2192 for (i = 0; i < BNX_TSO_NSTATS; ++i) {
2193 ksnprintf(desc, sizeof(desc), "tso%d", i + 1);
2194 SYSCTL_ADD_ULONG(&sc->bnx_sysctl_ctx,
2195 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2196 desc, CTLFLAG_RW, &sc->bnx_tsosegs[i], "");
2197 }
2198#endif
2199
6c8d8ecc
SZ
2200 /*
2201 * Call MI attach routine.
2202 */
2203 ether_ifattach(ifp, ether_addr, NULL);
2204
2205 if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) {
2206 if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) {
2207 intr_func = bnx_msi_oneshot;
2208 if (bootverbose)
2209 device_printf(dev, "oneshot MSI\n");
2210 } else {
2211 intr_func = bnx_msi;
2212 }
2213 } else {
2214 intr_func = bnx_intr_legacy;
2215 }
2216 error = bus_setup_intr(dev, sc->bnx_irq, INTR_MPSAFE, intr_func, sc,
2217 &sc->bnx_intrhand, ifp->if_serializer);
2218 if (error) {
2219 ether_ifdetach(ifp);
2220 device_printf(dev, "couldn't set up irq\n");
2221 goto fail;
2222 }
2223
2224 ifp->if_cpuid = rman_get_cpuid(sc->bnx_irq);
2225 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
2226
8ca0f604 2227 sc->bnx_stat_cpuid = ifp->if_cpuid;
df9ccc98 2228 sc->bnx_intr_cpuid = ifp->if_cpuid;
8ca0f604 2229
6c8d8ecc
SZ
2230 return(0);
2231fail:
2232 bnx_detach(dev);
2233 return(error);
2234}
2235
2236static int
2237bnx_detach(device_t dev)
2238{
2239 struct bnx_softc *sc = device_get_softc(dev);
2240
2241 if (device_is_attached(dev)) {
2242 struct ifnet *ifp = &sc->arpcom.ac_if;
2243
2244 lwkt_serialize_enter(ifp->if_serializer);
2245 bnx_stop(sc);
2246 bnx_reset(sc);
2247 bus_teardown_intr(dev, sc->bnx_irq, sc->bnx_intrhand);
2248 lwkt_serialize_exit(ifp->if_serializer);
2249
2250 ether_ifdetach(ifp);
2251 }
2252
2253 if (sc->bnx_flags & BNX_FLAG_TBI)
2254 ifmedia_removeall(&sc->bnx_ifmedia);
2255 if (sc->bnx_miibus)
2256 device_delete_child(dev, sc->bnx_miibus);
2257 bus_generic_detach(dev);
2258
2259 if (sc->bnx_irq != NULL) {
2260 bus_release_resource(dev, SYS_RES_IRQ, sc->bnx_irq_rid,
2261 sc->bnx_irq);
2262 }
2263 if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI)
2264 pci_release_msi(dev);
2265
2266 if (sc->bnx_res != NULL) {
2267 bus_release_resource(dev, SYS_RES_MEMORY,
2268 BGE_PCI_BAR0, sc->bnx_res);
2269 }
2270
2271 if (sc->bnx_sysctl_tree != NULL)
2272 sysctl_ctx_free(&sc->bnx_sysctl_ctx);
2273
2274 bnx_dma_free(sc);
2275
2276 return 0;
2277}
2278
2279static void
2280bnx_reset(struct bnx_softc *sc)
2281{
2282 device_t dev;
2283 uint32_t cachesize, command, pcistate, reset;
2284 void (*write_op)(struct bnx_softc *, uint32_t, uint32_t);
2285 int i, val = 0;
2286 uint16_t devctl;
2287
2288 dev = sc->bnx_dev;
2289
2290 if (sc->bnx_asicrev != BGE_ASICREV_BCM5906)
2291 write_op = bnx_writemem_direct;
2292 else
2293 write_op = bnx_writereg_ind;
2294
2295 /* Save some important PCI state. */
2296 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2297 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2298 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2299
2300 pci_write_config(dev, BGE_PCI_MISC_CTL,
2301 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2302 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2303 BGE_PCIMISCCTL_TAGGED_STATUS, 4);
2304
2305 /* Disable fastboot on controllers that support it. */
2306 if (bootverbose)
2307 if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n");
2308 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2309
2310 /*
2311 * Write the magic number to SRAM at offset 0xB50.
2312 * When firmware finishes its initialization it will
2313 * write ~BGE_MAGIC_NUMBER to the same location.
2314 */
2315 bnx_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2316
2317 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2318
2319 /* XXX: Broadcom Linux driver. */
2320 /* Force PCI-E 1.0a mode */
3730a14d 2321 if (!BNX_IS_57765_PLUS(sc) &&
6c8d8ecc
SZ
2322 CSR_READ_4(sc, BGE_PCIE_PHY_TSTCTL) ==
2323 (BGE_PCIE_PHY_TSTCTL_PSCRAM |
2324 BGE_PCIE_PHY_TSTCTL_PCIE10)) {
2325 CSR_WRITE_4(sc, BGE_PCIE_PHY_TSTCTL,
2326 BGE_PCIE_PHY_TSTCTL_PSCRAM);
2327 }
2328 if (sc->bnx_chipid != BGE_CHIPID_BCM5750_A0) {
2329 /* Prevent PCIE link training during global reset */
2330 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2331 reset |= (1<<29);
2332 }
2333
2334 /*
2335 * Set GPHY Power Down Override to leave GPHY
2336 * powered up in D0 uninitialized.
2337 */
2338 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0)
2339 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
2340
2341 /* Issue global reset */
2342 write_op(sc, BGE_MISC_CFG, reset);
2343
2344 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
2345 uint32_t status, ctrl;
2346
2347 status = CSR_READ_4(sc, BGE_VCPU_STATUS);
2348 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
2349 status | BGE_VCPU_STATUS_DRV_RESET);
2350 ctrl = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
2351 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
2352 ctrl & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
2353 }
2354
2355 DELAY(1000);
2356
2357 /* XXX: Broadcom Linux driver. */
2358 if (sc->bnx_chipid == BGE_CHIPID_BCM5750_A0) {
2359 uint32_t v;
2360
2361 DELAY(500000); /* wait for link training to complete */
2362 v = pci_read_config(dev, 0xc4, 4);
2363 pci_write_config(dev, 0xc4, v | (1<<15), 4);
2364 }
2365
2366 devctl = pci_read_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL, 2);
2367
2368 /* Disable no snoop and disable relaxed ordering. */
2369 devctl &= ~(PCIEM_DEVCTL_RELAX_ORDER | PCIEM_DEVCTL_NOSNOOP);
2370
2371 /* Old PCI-E chips only support 128 bytes Max PayLoad Size. */
2372 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0) {
2373 devctl &= ~PCIEM_DEVCTL_MAX_PAYLOAD_MASK;
2374 devctl |= PCIEM_DEVCTL_MAX_PAYLOAD_128;
2375 }
2376
2377 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL,
2378 devctl, 2);
2379
2380 /* Clear error status. */
2381 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVSTS,
2382 PCIEM_DEVSTS_CORR_ERR |
2383 PCIEM_DEVSTS_NFATAL_ERR |
2384 PCIEM_DEVSTS_FATAL_ERR |
2385 PCIEM_DEVSTS_UNSUPP_REQ, 2);
2386
2387 /* Reset some of the PCI state that got zapped by reset */
2388 pci_write_config(dev, BGE_PCI_MISC_CTL,
2389 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2390 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2391 BGE_PCIMISCCTL_TAGGED_STATUS, 4);
2392 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2393 pci_write_config(dev, BGE_PCI_CMD, command, 4);
2394 write_op(sc, BGE_MISC_CFG, (65 << 1));
2395
2396 /* Enable memory arbiter */
2397 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2398
2399 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
2400 for (i = 0; i < BNX_TIMEOUT; i++) {
2401 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2402 if (val & BGE_VCPU_STATUS_INIT_DONE)
2403 break;
2404 DELAY(100);
2405 }
2406 if (i == BNX_TIMEOUT) {
2407 if_printf(&sc->arpcom.ac_if, "reset timed out\n");
2408 return;
2409 }
2410 } else {
2411 /*
2412 * Poll until we see the 1's complement of the magic number.
2413 * This indicates that the firmware initialization
2414 * is complete.
2415 */
2416 for (i = 0; i < BNX_FIRMWARE_TIMEOUT; i++) {
2417 val = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2418 if (val == ~BGE_MAGIC_NUMBER)
2419 break;
2420 DELAY(10);
2421 }
2422 if (i == BNX_FIRMWARE_TIMEOUT) {
2423 if_printf(&sc->arpcom.ac_if, "firmware handshake "
2424 "timed out, found 0x%08x\n", val);
2425 }
2426
2427 /* BCM57765 A0 needs additional time before accessing. */
2428 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0)
2429 DELAY(10 * 1000);
2430 }
2431
2432 /*
2433 * XXX Wait for the value of the PCISTATE register to
2434 * return to its original pre-reset state. This is a
2435 * fairly good indicator of reset completion. If we don't
2436 * wait for the reset to fully complete, trying to read
2437 * from the device's non-PCI registers may yield garbage
2438 * results.
2439 */
2440 for (i = 0; i < BNX_TIMEOUT; i++) {
2441 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2442 break;
2443 DELAY(10);
2444 }
2445
2446 /* Fix up byte swapping */
2447 CSR_WRITE_4(sc, BGE_MODE_CTL, bnx_dma_swap_options(sc));
2448
2449 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2450
2451 /*
2452 * The 5704 in TBI mode apparently needs some special
2453 * adjustment to insure the SERDES drive level is set
2454 * to 1.2V.
2455 */
2456 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704 &&
2457 (sc->bnx_flags & BNX_FLAG_TBI)) {
2458 uint32_t serdescfg;
2459
2460 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2461 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2462 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2463 }
2464
7892075d
SZ
2465 CSR_WRITE_4(sc, BGE_MI_MODE,
2466 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
2467 DELAY(80);
2468
6c8d8ecc 2469 /* XXX: Broadcom Linux driver. */
3730a14d 2470 if (!BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
2471 uint32_t v;
2472
2473 /* Enable Data FIFO protection. */
f1f34fc4
SZ
2474 v = CSR_READ_4(sc, BGE_PCIE_TLDLPL_PORT);
2475 CSR_WRITE_4(sc, BGE_PCIE_TLDLPL_PORT, v | (1 << 25));
6c8d8ecc
SZ
2476 }
2477
2478 DELAY(10000);
2479
2480 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
2481 BNX_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
2482 CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
2483 }
2484}
2485
2486/*
2487 * Frame reception handling. This is called if there's a frame
2488 * on the receive return list.
2489 *
2490 * Note: we have to be able to handle two possibilities here:
2491 * 1) the frame is from the jumbo recieve ring
2492 * 2) the frame is from the standard receive ring
2493 */
2494
2495static void
2496bnx_rxeof(struct bnx_softc *sc, uint16_t rx_prod)
2497{
2498 struct ifnet *ifp;
2499 int stdcnt = 0, jumbocnt = 0;
2500
2501 ifp = &sc->arpcom.ac_if;
2502
2503 while (sc->bnx_rx_saved_considx != rx_prod) {
2504 struct bge_rx_bd *cur_rx;
2505 uint32_t rxidx;
2506 struct mbuf *m = NULL;
2507 uint16_t vlan_tag = 0;
2508 int have_tag = 0;
2509
2510 cur_rx =
2511 &sc->bnx_ldata.bnx_rx_return_ring[sc->bnx_rx_saved_considx];
2512
2513 rxidx = cur_rx->bge_idx;
2514 BNX_INC(sc->bnx_rx_saved_considx, sc->bnx_return_ring_cnt);
2515
2516 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2517 have_tag = 1;
2518 vlan_tag = cur_rx->bge_vlan_tag;
2519 }
2520
2521 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2522 BNX_INC(sc->bnx_jumbo, BGE_JUMBO_RX_RING_CNT);
2523 jumbocnt++;
2524
2525 if (rxidx != sc->bnx_jumbo) {
2526 ifp->if_ierrors++;
2527 if_printf(ifp, "sw jumbo index(%d) "
2528 "and hw jumbo index(%d) mismatch, drop!\n",
2529 sc->bnx_jumbo, rxidx);
2530 bnx_setup_rxdesc_jumbo(sc, rxidx);
2531 continue;
2532 }
2533
2534 m = sc->bnx_cdata.bnx_rx_jumbo_chain[rxidx].bnx_mbuf;
2535 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2536 ifp->if_ierrors++;
2537 bnx_setup_rxdesc_jumbo(sc, sc->bnx_jumbo);
2538 continue;
2539 }
2540 if (bnx_newbuf_jumbo(sc, sc->bnx_jumbo, 0)) {
2541 ifp->if_ierrors++;
2542 bnx_setup_rxdesc_jumbo(sc, sc->bnx_jumbo);
2543 continue;
2544 }
2545 } else {
2546 BNX_INC(sc->bnx_std, BGE_STD_RX_RING_CNT);
2547 stdcnt++;
2548
2549 if (rxidx != sc->bnx_std) {
2550 ifp->if_ierrors++;
2551 if_printf(ifp, "sw std index(%d) "
2552 "and hw std index(%d) mismatch, drop!\n",
2553 sc->bnx_std, rxidx);
2554 bnx_setup_rxdesc_std(sc, rxidx);
2555 continue;
2556 }
2557
2558 m = sc->bnx_cdata.bnx_rx_std_chain[rxidx].bnx_mbuf;
2559 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2560 ifp->if_ierrors++;
2561 bnx_setup_rxdesc_std(sc, sc->bnx_std);
2562 continue;
2563 }
2564 if (bnx_newbuf_std(sc, sc->bnx_std, 0)) {
2565 ifp->if_ierrors++;
2566 bnx_setup_rxdesc_std(sc, sc->bnx_std);
2567 continue;
2568 }
2569 }
2570
2571 ifp->if_ipackets++;
2572 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2573 m->m_pkthdr.rcvif = ifp;
2574
2575 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2576 (cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
2577 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2578 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2579 if ((cur_rx->bge_error_flag &
2580 BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
2581 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2582 }
2583 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2584 m->m_pkthdr.csum_data =
2585 cur_rx->bge_tcp_udp_csum;
2586 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
2587 CSUM_PSEUDO_HDR;
2588 }
2589 }
2590
2591 /*
2592 * If we received a packet with a vlan tag, pass it
2593 * to vlan_input() instead of ether_input().
2594 */
2595 if (have_tag) {
2596 m->m_flags |= M_VLANTAG;
2597 m->m_pkthdr.ether_vlantag = vlan_tag;
2598 have_tag = vlan_tag = 0;
2599 }
2600 ifp->if_input(ifp, m);
2601 }
2602
2603 bnx_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bnx_rx_saved_considx);
2604 if (stdcnt)
2605 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bnx_std);
2606 if (jumbocnt)
2607 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo);
2608}
2609
2610static void
2611bnx_txeof(struct bnx_softc *sc, uint16_t tx_cons)
2612{
6c8d8ecc
SZ
2613 struct ifnet *ifp;
2614
2615 ifp = &sc->arpcom.ac_if;
2616
2617 /*
2618 * Go through our tx ring and free mbufs for those
2619 * frames that have been sent.
2620 */
2621 while (sc->bnx_tx_saved_considx != tx_cons) {
2622 uint32_t idx = 0;
2623
2624 idx = sc->bnx_tx_saved_considx;
6c8d8ecc 2625 if (sc->bnx_cdata.bnx_tx_chain[idx] != NULL) {
9a103adf 2626 ifp->if_opackets++;
6c8d8ecc
SZ
2627 bus_dmamap_unload(sc->bnx_cdata.bnx_tx_mtag,
2628 sc->bnx_cdata.bnx_tx_dmamap[idx]);
2629 m_freem(sc->bnx_cdata.bnx_tx_chain[idx]);
2630 sc->bnx_cdata.bnx_tx_chain[idx] = NULL;
2631 }
2632 sc->bnx_txcnt--;
2633 BNX_INC(sc->bnx_tx_saved_considx, BGE_TX_RING_CNT);
2634 }
2635
9a103adf 2636 if ((BGE_TX_RING_CNT - sc->bnx_txcnt) >=
6c8d8ecc
SZ
2637 (BNX_NSEG_RSVD + BNX_NSEG_SPARE))
2638 ifp->if_flags &= ~IFF_OACTIVE;
2639
2640 if (sc->bnx_txcnt == 0)
2641 ifp->if_timer = 0;
2642
2643 if (!ifq_is_empty(&ifp->if_snd))
2644 if_devstart(ifp);
2645}
2646
39a8d43a 2647#ifdef IFPOLL_ENABLE
6c8d8ecc
SZ
2648
2649static void
39a8d43a
SZ
2650bnx_npoll(struct ifnet *ifp, struct ifpoll_info *info)
2651{
2652 struct bnx_softc *sc = ifp->if_softc;
2653
2654 ASSERT_SERIALIZED(ifp->if_serializer);
2655
2656 if (info != NULL) {
2657 int cpuid = sc->bnx_npoll_cpuid;
2658
2659 info->ifpi_rx[cpuid].poll_func = bnx_npoll_compat;
2660 info->ifpi_rx[cpuid].arg = NULL;
2661 info->ifpi_rx[cpuid].serializer = ifp->if_serializer;
2662
2663 if (ifp->if_flags & IFF_RUNNING)
2664 bnx_disable_intr(sc);
2665 ifp->if_npoll_cpuid = cpuid;
2666 } else {
2667 if (ifp->if_flags & IFF_RUNNING)
2668 bnx_enable_intr(sc);
2669 ifp->if_npoll_cpuid = -1;
2670 }
2671}
2672
2673static void
2674bnx_npoll_compat(struct ifnet *ifp, void *arg __unused, int cycle __unused)
6c8d8ecc
SZ
2675{
2676 struct bnx_softc *sc = ifp->if_softc;
2677 struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
2678 uint16_t rx_prod, tx_cons;
2679
39a8d43a
SZ
2680 ASSERT_SERIALIZED(ifp->if_serializer);
2681
2682 if (sc->bnx_npoll_stcount-- == 0) {
2683 sc->bnx_npoll_stcount = sc->bnx_npoll_stfrac;
6c8d8ecc
SZ
2684 /*
2685 * Process link state changes.
2686 */
2687 bnx_link_poll(sc);
6c8d8ecc 2688 }
39a8d43a
SZ
2689
2690 sc->bnx_status_tag = sblk->bge_status_tag;
2691
2692 /*
2693 * Use a load fence to ensure that status_tag is saved
2694 * before rx_prod and tx_cons.
2695 */
2696 cpu_lfence();
2697
2698 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
2699 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
2700
2701 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
2702 if (sc->bnx_rx_saved_considx != rx_prod)
2703 bnx_rxeof(sc, rx_prod);
2704
2705 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
2706 if (sc->bnx_tx_saved_considx != tx_cons)
2707 bnx_txeof(sc, tx_cons);
6c8d8ecc
SZ
2708}
2709
39a8d43a 2710#endif /* IFPOLL_ENABLE */
6c8d8ecc
SZ
2711
2712static void
2713bnx_intr_legacy(void *xsc)
2714{
2715 struct bnx_softc *sc = xsc;
2716 struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
2717
2718 if (sc->bnx_status_tag == sblk->bge_status_tag) {
2719 uint32_t val;
2720
2721 val = pci_read_config(sc->bnx_dev, BGE_PCI_PCISTATE, 4);
2722 if (val & BGE_PCISTAT_INTR_NOTACT)
2723 return;
2724 }
2725
2726 /*
2727 * NOTE:
2728 * Interrupt will have to be disabled if tagged status
2729 * is used, else interrupt will always be asserted on
2730 * certain chips (at least on BCM5750 AX/BX).
2731 */
2732 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
2733
2734 bnx_intr(sc);
2735}
2736
2737static void
2738bnx_msi(void *xsc)
2739{
2740 struct bnx_softc *sc = xsc;
2741
2742 /* Disable interrupt first */
2743 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
2744 bnx_intr(sc);
2745}
2746
2747static void
2748bnx_msi_oneshot(void *xsc)
2749{
2750 bnx_intr(xsc);
2751}
2752
2753static void
2754bnx_intr(struct bnx_softc *sc)
2755{
2756 struct ifnet *ifp = &sc->arpcom.ac_if;
2757 struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
2758 uint16_t rx_prod, tx_cons;
2759 uint32_t status;
2760
2761 sc->bnx_status_tag = sblk->bge_status_tag;
2762 /*
2763 * Use a load fence to ensure that status_tag is saved
2764 * before rx_prod, tx_cons and status.
2765 */
2766 cpu_lfence();
2767
2768 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
2769 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
2770 status = sblk->bge_status;
2771
2772 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) || sc->bnx_link_evt)
2773 bnx_link_poll(sc);
2774
2775 if (ifp->if_flags & IFF_RUNNING) {
2776 if (sc->bnx_rx_saved_considx != rx_prod)
2777 bnx_rxeof(sc, rx_prod);
2778
2779 if (sc->bnx_tx_saved_considx != tx_cons)
2780 bnx_txeof(sc, tx_cons);
2781 }
2782
2783 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24);
2784
2785 if (sc->bnx_coal_chg)
2786 bnx_coal_change(sc);
2787}
2788
2789static void
2790bnx_tick(void *xsc)
2791{
2792 struct bnx_softc *sc = xsc;
2793 struct ifnet *ifp = &sc->arpcom.ac_if;
2794
2795 lwkt_serialize_enter(ifp->if_serializer);
2796
8ca0f604
SZ
2797 KKASSERT(mycpuid == sc->bnx_stat_cpuid);
2798
6c8d8ecc
SZ
2799 bnx_stats_update_regs(sc);
2800
2801 if (sc->bnx_flags & BNX_FLAG_TBI) {
2802 /*
2803 * Since in TBI mode auto-polling can't be used we should poll
2804 * link status manually. Here we register pending link event
2805 * and trigger interrupt.
2806 */
2807 sc->bnx_link_evt++;
2808 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
2809 } else if (!sc->bnx_link) {
2810 mii_tick(device_get_softc(sc->bnx_miibus));
2811 }
2812
2813 callout_reset(&sc->bnx_stat_timer, hz, bnx_tick, sc);
2814
2815 lwkt_serialize_exit(ifp->if_serializer);
2816}
2817
2818static void
2819bnx_stats_update_regs(struct bnx_softc *sc)
2820{
2821 struct ifnet *ifp = &sc->arpcom.ac_if;
2822 struct bge_mac_stats_regs stats;
2823 uint32_t *s;
2824 int i;
2825
2826 s = (uint32_t *)&stats;
2827 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2828 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2829 s++;
2830 }
2831
2832 ifp->if_collisions +=
2833 (stats.dot3StatsSingleCollisionFrames +
2834 stats.dot3StatsMultipleCollisionFrames +
2835 stats.dot3StatsExcessiveCollisions +
2836 stats.dot3StatsLateCollisions) -
2837 ifp->if_collisions;
2838}
2839
2840/*
2841 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2842 * pointers to descriptors.
2843 */
2844static int
2845bnx_encap(struct bnx_softc *sc, struct mbuf **m_head0, uint32_t *txidx)
2846{
2847 struct bge_tx_bd *d = NULL;
66deb1c1 2848 uint16_t csum_flags = 0, vlan_tag = 0, mss = 0;
6c8d8ecc
SZ
2849 bus_dma_segment_t segs[BNX_NSEG_NEW];
2850 bus_dmamap_t map;
2851 int error, maxsegs, nsegs, idx, i;
2852 struct mbuf *m_head = *m_head0, *m_new;
2853
66deb1c1
SZ
2854 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
2855#ifdef BNX_TSO_DEBUG
2856 int tso_nsegs;
2857#endif
2858
2859 error = bnx_setup_tso(sc, m_head0, &mss, &csum_flags);
2860 if (error)
2861 return error;
2862 m_head = *m_head0;
2863
2864#ifdef BNX_TSO_DEBUG
f0336d39
SZ
2865 tso_nsegs = (m_head->m_pkthdr.len /
2866 m_head->m_pkthdr.tso_segsz) - 1;
66deb1c1
SZ
2867 if (tso_nsegs > (BNX_TSO_NSTATS - 1))
2868 tso_nsegs = BNX_TSO_NSTATS - 1;
2869 else if (tso_nsegs < 0)
2870 tso_nsegs = 0;
2871 sc->bnx_tsosegs[tso_nsegs]++;
2872#endif
2873 } else if (m_head->m_pkthdr.csum_flags & BNX_CSUM_FEATURES) {
6c8d8ecc
SZ
2874 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2875 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2876 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2877 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2878 if (m_head->m_flags & M_LASTFRAG)
2879 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2880 else if (m_head->m_flags & M_FRAG)
2881 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2882 }
66deb1c1
SZ
2883 if (m_head->m_flags & M_VLANTAG) {
2884 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
2885 vlan_tag = m_head->m_pkthdr.ether_vlantag;
2886 }
6c8d8ecc
SZ
2887
2888 idx = *txidx;
2889 map = sc->bnx_cdata.bnx_tx_dmamap[idx];
2890
2891 maxsegs = (BGE_TX_RING_CNT - sc->bnx_txcnt) - BNX_NSEG_RSVD;
2892 KASSERT(maxsegs >= BNX_NSEG_SPARE,
2893 ("not enough segments %d", maxsegs));
2894
2895 if (maxsegs > BNX_NSEG_NEW)
2896 maxsegs = BNX_NSEG_NEW;
2897
2898 /*
2899 * Pad outbound frame to BGE_MIN_FRAMELEN for an unusual reason.
2900 * The bge hardware will pad out Tx runts to BGE_MIN_FRAMELEN,
2901 * but when such padded frames employ the bge IP/TCP checksum
2902 * offload, the hardware checksum assist gives incorrect results
2903 * (possibly from incorporating its own padding into the UDP/TCP
2904 * checksum; who knows). If we pad such runts with zeros, the
2905 * onboard checksum comes out correct.
2906 */
2907 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
2908 m_head->m_pkthdr.len < BNX_MIN_FRAMELEN) {
2909 error = m_devpad(m_head, BNX_MIN_FRAMELEN);
2910 if (error)
2911 goto back;
2912 }
2913
2914 if ((sc->bnx_flags & BNX_FLAG_SHORTDMA) && m_head->m_next != NULL) {
2915 m_new = bnx_defrag_shortdma(m_head);
2916 if (m_new == NULL) {
2917 error = ENOBUFS;
2918 goto back;
2919 }
2920 *m_head0 = m_head = m_new;
2921 }
66deb1c1
SZ
2922 if ((m_head->m_pkthdr.csum_flags & CSUM_TSO) == 0 &&
2923 sc->bnx_force_defrag && m_head->m_next != NULL) {
6c8d8ecc
SZ
2924 /*
2925 * Forcefully defragment mbuf chain to overcome hardware
2926 * limitation which only support a single outstanding
2927 * DMA read operation. If it fails, keep moving on using
2928 * the original mbuf chain.
2929 */
2930 m_new = m_defrag(m_head, MB_DONTWAIT);
2931 if (m_new != NULL)
2932 *m_head0 = m_head = m_new;
2933 }
2934
2935 error = bus_dmamap_load_mbuf_defrag(sc->bnx_cdata.bnx_tx_mtag, map,
2936 m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
2937 if (error)
2938 goto back;
2939
2940 m_head = *m_head0;
2941 bus_dmamap_sync(sc->bnx_cdata.bnx_tx_mtag, map, BUS_DMASYNC_PREWRITE);
2942
2943 for (i = 0; ; i++) {
2944 d = &sc->bnx_ldata.bnx_tx_ring[idx];
2945
2946 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
2947 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
2948 d->bge_len = segs[i].ds_len;
2949 d->bge_flags = csum_flags;
66deb1c1
SZ
2950 d->bge_vlan_tag = vlan_tag;
2951 d->bge_mss = mss;
6c8d8ecc
SZ
2952
2953 if (i == nsegs - 1)
2954 break;
2955 BNX_INC(idx, BGE_TX_RING_CNT);
2956 }
2957 /* Mark the last segment as end of packet... */
2958 d->bge_flags |= BGE_TXBDFLAG_END;
2959
6c8d8ecc
SZ
2960 /*
2961 * Insure that the map for this transmission is placed at
2962 * the array index of the last descriptor in this chain.
2963 */
2964 sc->bnx_cdata.bnx_tx_dmamap[*txidx] = sc->bnx_cdata.bnx_tx_dmamap[idx];
2965 sc->bnx_cdata.bnx_tx_dmamap[idx] = map;
2966 sc->bnx_cdata.bnx_tx_chain[idx] = m_head;
2967 sc->bnx_txcnt += nsegs;
2968
2969 BNX_INC(idx, BGE_TX_RING_CNT);
2970 *txidx = idx;
2971back:
2972 if (error) {
2973 m_freem(*m_head0);
2974 *m_head0 = NULL;
2975 }
2976 return error;
2977}
2978
2979/*
2980 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2981 * to the mbuf data regions directly in the transmit descriptors.
2982 */
2983static void
2984bnx_start(struct ifnet *ifp)
2985{
2986 struct bnx_softc *sc = ifp->if_softc;
2987 struct mbuf *m_head = NULL;
2988 uint32_t prodidx;
2989 int need_trans;
2990
2991 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
2992 return;
2993
2994 prodidx = sc->bnx_tx_prodidx;
2995
2996 need_trans = 0;
2997 while (sc->bnx_cdata.bnx_tx_chain[prodidx] == NULL) {
6c8d8ecc
SZ
2998 /*
2999 * Sanity check: avoid coming within BGE_NSEG_RSVD
3000 * descriptors of the end of the ring. Also make
3001 * sure there are BGE_NSEG_SPARE descriptors for
a1bd58c9 3002 * jumbo buffers' or TSO segments' defragmentation.
6c8d8ecc
SZ
3003 */
3004 if ((BGE_TX_RING_CNT - sc->bnx_txcnt) <
3005 (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) {
3006 ifp->if_flags |= IFF_OACTIVE;
6c8d8ecc
SZ
3007 break;
3008 }
3009
a1bd58c9
SZ
3010 m_head = ifq_dequeue(&ifp->if_snd, NULL);
3011 if (m_head == NULL)
3012 break;
3013
6c8d8ecc
SZ
3014 /*
3015 * Pack the data into the transmit ring. If we
3016 * don't have room, set the OACTIVE flag and wait
3017 * for the NIC to drain the ring.
3018 */
3019 if (bnx_encap(sc, &m_head, &prodidx)) {
3020 ifp->if_flags |= IFF_OACTIVE;
3021 ifp->if_oerrors++;
3022 break;
3023 }
3024 need_trans = 1;
3025
3026 ETHER_BPF_MTAP(ifp, m_head);
3027 }
3028
3029 if (!need_trans)
3030 return;
3031
3032 /* Transmit */
3033 bnx_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3034
3035 sc->bnx_tx_prodidx = prodidx;
3036
3037 /*
3038 * Set a timeout in case the chip goes out to lunch.
3039 */
3040 ifp->if_timer = 5;
3041}
3042
3043static void
3044bnx_init(void *xsc)
3045{
3046 struct bnx_softc *sc = xsc;
3047 struct ifnet *ifp = &sc->arpcom.ac_if;
3048 uint16_t *m;
3049 uint32_t mode;
3050
3051 ASSERT_SERIALIZED(ifp->if_serializer);
3052
3053 /* Cancel pending I/O and flush buffers. */
3054 bnx_stop(sc);
3055 bnx_reset(sc);
3056 bnx_chipinit(sc);
3057
3058 /*
3059 * Init the various state machines, ring
3060 * control blocks and firmware.
3061 */
3062 if (bnx_blockinit(sc)) {
3063 if_printf(ifp, "initialization failure\n");
3064 bnx_stop(sc);
3065 return;
3066 }
3067
3068 /* Specify MTU. */
3069 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3070 ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
3071
3072 /* Load our MAC address. */
3073 m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
3074 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3075 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3076
3077 /* Enable or disable promiscuous mode as needed. */
3078 bnx_setpromisc(sc);
3079
3080 /* Program multicast filter. */
3081 bnx_setmulti(sc);
3082
3083 /* Init RX ring. */
3084 if (bnx_init_rx_ring_std(sc)) {
3085 if_printf(ifp, "RX ring initialization failed\n");
3086 bnx_stop(sc);
3087 return;
3088 }
3089
3090 /* Init jumbo RX ring. */
3091 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) {
3092 if (bnx_init_rx_ring_jumbo(sc)) {
3093 if_printf(ifp, "Jumbo RX ring initialization failed\n");
3094 bnx_stop(sc);
3095 return;
3096 }
3097 }
3098
3099 /* Init our RX return ring index */
3100 sc->bnx_rx_saved_considx = 0;
3101
3102 /* Init TX ring. */
3103 bnx_init_tx_ring(sc);
3104
3105 /* Enable TX MAC state machine lockup fix. */
3106 mode = CSR_READ_4(sc, BGE_TX_MODE);
3107 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
3108 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
3109 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
3110 mode |= CSR_READ_4(sc, BGE_TX_MODE) &
3111 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
3112 }
3113 /* Turn on transmitter */
3114 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
3115
3116 /* Turn on receiver */
3117 BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3118
3119 /*
3120 * Set the number of good frames to receive after RX MBUF
3121 * Low Watermark has been reached. After the RX MAC receives
3122 * this number of frames, it will drop subsequent incoming
3123 * frames until the MBUF High Watermark is reached.
3124 */
bcb29629 3125 if (BNX_IS_57765_FAMILY(sc))
6c8d8ecc
SZ
3126 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
3127 else
3128 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
3129
3130 if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) {
3131 if (bootverbose) {
3132 if_printf(ifp, "MSI_MODE: %#x\n",
3133 CSR_READ_4(sc, BGE_MSI_MODE));
3134 }
3135 }
3136
3137 /* Tell firmware we're alive. */
3138 BNX_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3139
3140 /* Enable host interrupts if polling(4) is not enabled. */
3141 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA, 4);
39a8d43a
SZ
3142#ifdef IFPOLL_ENABLE
3143 if (ifp->if_flags & IFF_NPOLLING)
6c8d8ecc
SZ
3144 bnx_disable_intr(sc);
3145 else
3146#endif
3147 bnx_enable_intr(sc);
3148
3149 bnx_ifmedia_upd(ifp);
3150
3151 ifp->if_flags |= IFF_RUNNING;
3152 ifp->if_flags &= ~IFF_OACTIVE;
3153
8ca0f604
SZ
3154 callout_reset_bycpu(&sc->bnx_stat_timer, hz, bnx_tick, sc,
3155 sc->bnx_stat_cpuid);
6c8d8ecc
SZ
3156}
3157
3158/*
3159 * Set media options.
3160 */
3161static int
3162bnx_ifmedia_upd(struct ifnet *ifp)
3163{
3164 struct bnx_softc *sc = ifp->if_softc;
3165
3166 /* If this is a 1000baseX NIC, enable the TBI port. */
3167 if (sc->bnx_flags & BNX_FLAG_TBI) {
3168 struct ifmedia *ifm = &sc->bnx_ifmedia;
3169
3170 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3171 return(EINVAL);
3172
3173 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3174 case IFM_AUTO:
3175 break;
3176
3177 case IFM_1000_SX:
3178 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3179 BNX_CLRBIT(sc, BGE_MAC_MODE,
3180 BGE_MACMODE_HALF_DUPLEX);
3181 } else {
3182 BNX_SETBIT(sc, BGE_MAC_MODE,
3183 BGE_MACMODE_HALF_DUPLEX);
3184 }
3185 break;
3186 default:
3187 return(EINVAL);
3188 }
3189 } else {
3190 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3191
3192 sc->bnx_link_evt++;
3193 sc->bnx_link = 0;
3194 if (mii->mii_instance) {
3195 struct mii_softc *miisc;
3196
3197 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3198 mii_phy_reset(miisc);
3199 }
3200 mii_mediachg(mii);
3201
3202 /*
3203 * Force an interrupt so that we will call bnx_link_upd
3204 * if needed and clear any pending link state attention.
3205 * Without this we are not getting any further interrupts
3206 * for link state changes and thus will not UP the link and
3207 * not be able to send in bnx_start. The only way to get
3208 * things working was to receive a packet and get an RX
3209 * intr.
3210 *
3211 * bnx_tick should help for fiber cards and we might not
3212 * need to do this here if BNX_FLAG_TBI is set but as
3213 * we poll for fiber anyway it should not harm.
3214 */
3215 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3216 }
3217 return(0);
3218}
3219
3220/*
3221 * Report current media status.
3222 */
3223static void
3224bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3225{
3226 struct bnx_softc *sc = ifp->if_softc;
3227
3228 if (sc->bnx_flags & BNX_FLAG_TBI) {
3229 ifmr->ifm_status = IFM_AVALID;
3230 ifmr->ifm_active = IFM_ETHER;
3231 if (CSR_READ_4(sc, BGE_MAC_STS) &
3232 BGE_MACSTAT_TBI_PCS_SYNCHED) {
3233 ifmr->ifm_status |= IFM_ACTIVE;
3234 } else {
3235 ifmr->ifm_active |= IFM_NONE;
3236 return;
3237 }
3238
3239 ifmr->ifm_active |= IFM_1000_SX;
3240 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3241 ifmr->ifm_active |= IFM_HDX;
3242 else
3243 ifmr->ifm_active |= IFM_FDX;
3244 } else {
3245 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3246
3247 mii_pollstat(mii);
3248 ifmr->ifm_active = mii->mii_media_active;
3249 ifmr->ifm_status = mii->mii_media_status;
3250 }
3251}
3252
3253static int
3254bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
3255{
3256 struct bnx_softc *sc = ifp->if_softc;
3257 struct ifreq *ifr = (struct ifreq *)data;
3258 int mask, error = 0;
3259
3260 ASSERT_SERIALIZED(ifp->if_serializer);
3261
3262 switch (command) {
3263 case SIOCSIFMTU:
3264 if ((!BNX_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) ||
3265 (BNX_IS_JUMBO_CAPABLE(sc) &&
3266 ifr->ifr_mtu > BNX_JUMBO_MTU)) {
3267 error = EINVAL;
3268 } else if (ifp->if_mtu != ifr->ifr_mtu) {
3269 ifp->if_mtu = ifr->ifr_mtu;
3270 if (ifp->if_flags & IFF_RUNNING)
3271 bnx_init(sc);
3272 }
3273 break;
3274 case SIOCSIFFLAGS:
3275 if (ifp->if_flags & IFF_UP) {
3276 if (ifp->if_flags & IFF_RUNNING) {
3277 mask = ifp->if_flags ^ sc->bnx_if_flags;
3278
3279 /*
3280 * If only the state of the PROMISC flag
3281 * changed, then just use the 'set promisc
3282 * mode' command instead of reinitializing
3283 * the entire NIC. Doing a full re-init
3284 * means reloading the firmware and waiting
3285 * for it to start up, which may take a
3286 * second or two. Similarly for ALLMULTI.
3287 */
3288 if (mask & IFF_PROMISC)
3289 bnx_setpromisc(sc);
3290 if (mask & IFF_ALLMULTI)
3291 bnx_setmulti(sc);
3292 } else {
3293 bnx_init(sc);
3294 }
3295 } else if (ifp->if_flags & IFF_RUNNING) {
3296 bnx_stop(sc);
3297 }
3298 sc->bnx_if_flags = ifp->if_flags;
3299 break;
3300 case SIOCADDMULTI:
3301 case SIOCDELMULTI:
3302 if (ifp->if_flags & IFF_RUNNING)
3303 bnx_setmulti(sc);
3304 break;
3305 case SIOCSIFMEDIA:
3306 case SIOCGIFMEDIA:
3307 if (sc->bnx_flags & BNX_FLAG_TBI) {
3308 error = ifmedia_ioctl(ifp, ifr,
3309 &sc->bnx_ifmedia, command);
3310 } else {
3311 struct mii_data *mii;
3312
3313 mii = device_get_softc(sc->bnx_miibus);
3314 error = ifmedia_ioctl(ifp, ifr,
3315 &mii->mii_media, command);
3316 }
3317 break;
3318 case SIOCSIFCAP:
3319 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3320 if (mask & IFCAP_HWCSUM) {
3321 ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
66deb1c1
SZ
3322 if (ifp->if_capenable & IFCAP_TXCSUM)
3323 ifp->if_hwassist |= BNX_CSUM_FEATURES;
6c8d8ecc 3324 else
66deb1c1
SZ
3325 ifp->if_hwassist &= ~BNX_CSUM_FEATURES;
3326 }
3327 if (mask & IFCAP_TSO) {
3328 ifp->if_capenable ^= (mask & IFCAP_TSO);
3329 if (ifp->if_capenable & IFCAP_TSO)
3330 ifp->if_hwassist |= CSUM_TSO;
3331 else
3332 ifp->if_hwassist &= ~CSUM_TSO;
6c8d8ecc
SZ
3333 }
3334 break;
3335 default:
3336 error = ether_ioctl(ifp, command, data);
3337 break;
3338 }
3339 return error;
3340}
3341
3342static void
3343bnx_watchdog(struct ifnet *ifp)
3344{
3345 struct bnx_softc *sc = ifp->if_softc;
3346
3347 if_printf(ifp, "watchdog timeout -- resetting\n");
3348
3349 bnx_init(sc);
3350
3351 ifp->if_oerrors++;
3352
3353 if (!ifq_is_empty(&ifp->if_snd))
3354 if_devstart(ifp);
3355}
3356
3357/*
3358 * Stop the adapter and free any mbufs allocated to the
3359 * RX and TX lists.
3360 */
3361static void
3362bnx_stop(struct bnx_softc *sc)
3363{
3364 struct ifnet *ifp = &sc->arpcom.ac_if;
3365
3366 ASSERT_SERIALIZED(ifp->if_serializer);
3367
3368 callout_stop(&sc->bnx_stat_timer);
3369
3370 /*
3371 * Disable all of the receiver blocks
3372 */
3373 bnx_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3374 bnx_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3375 bnx_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3376 bnx_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3377 bnx_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3378 bnx_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3379
3380 /*
3381 * Disable all of the transmit blocks
3382 */
3383 bnx_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3384 bnx_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3385 bnx_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3386 bnx_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3387 bnx_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3388 bnx_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3389
3390 /*
3391 * Shut down all of the memory managers and related
3392 * state machines.
3393 */
3394 bnx_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3395 bnx_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3396 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3397 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3398
3399 /* Disable host interrupts. */
3400 bnx_disable_intr(sc);
3401
3402 /*
3403 * Tell firmware we're shutting down.
3404 */
3405 BNX_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3406
3407 /* Free the RX lists. */
3408 bnx_free_rx_ring_std(sc);
3409
3410 /* Free jumbo RX list. */
3411 if (BNX_IS_JUMBO_CAPABLE(sc))
3412 bnx_free_rx_ring_jumbo(sc);
3413
3414 /* Free TX buffers. */
3415 bnx_free_tx_ring(sc);
3416
3417 sc->bnx_status_tag = 0;
3418 sc->bnx_link = 0;
3419 sc->bnx_coal_chg = 0;
3420
3421 sc->bnx_tx_saved_considx = BNX_TXCONS_UNSET;
3422
3423 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3424 ifp->if_timer = 0;
3425}
3426
3427/*
3428 * Stop all chip I/O so that the kernel's probe routines don't
3429 * get confused by errant DMAs when rebooting.
3430 */
3431static void
3432bnx_shutdown(device_t dev)
3433{
3434 struct bnx_softc *sc = device_get_softc(dev);
3435 struct ifnet *ifp = &sc->arpcom.ac_if;
3436
3437 lwkt_serialize_enter(ifp->if_serializer);
3438 bnx_stop(sc);
3439 bnx_reset(sc);
3440 lwkt_serialize_exit(ifp->if_serializer);
3441}
3442
3443static int
3444bnx_suspend(device_t dev)
3445{
3446 struct bnx_softc *sc = device_get_softc(dev);
3447 struct ifnet *ifp = &sc->arpcom.ac_if;
3448
3449 lwkt_serialize_enter(ifp->if_serializer);
3450 bnx_stop(sc);
3451 lwkt_serialize_exit(ifp->if_serializer);
3452
3453 return 0;
3454}
3455
3456static int
3457bnx_resume(device_t dev)
3458{
3459 struct bnx_softc *sc = device_get_softc(dev);
3460 struct ifnet *ifp = &sc->arpcom.ac_if;
3461
3462 lwkt_serialize_enter(ifp->if_serializer);
3463
3464 if (ifp->if_flags & IFF_UP) {
3465 bnx_init(sc);
3466
3467 if (!ifq_is_empty(&ifp->if_snd))
3468 if_devstart(ifp);
3469 }
3470
3471 lwkt_serialize_exit(ifp->if_serializer);
3472
3473 return 0;
3474}
3475
3476static void
3477bnx_setpromisc(struct bnx_softc *sc)
3478{
3479 struct ifnet *ifp = &sc->arpcom.ac_if;
3480
3481 if (ifp->if_flags & IFF_PROMISC)
3482 BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3483 else
3484 BNX_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3485}
3486
3487static void
3488bnx_dma_free(struct bnx_softc *sc)
3489{
3490 int i;
3491
3492 /* Destroy RX mbuf DMA stuffs. */
3493 if (sc->bnx_cdata.bnx_rx_mtag != NULL) {
3494 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3495 bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag,
3496 sc->bnx_cdata.bnx_rx_std_dmamap[i]);
3497 }
3498 bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag,
3499 sc->bnx_cdata.bnx_rx_tmpmap);
3500 bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag);
3501 }
3502
3503 /* Destroy TX mbuf DMA stuffs. */
3504 if (sc->bnx_cdata.bnx_tx_mtag != NULL) {
3505 for (i = 0; i < BGE_TX_RING_CNT; i++) {
3506 bus_dmamap_destroy(sc->bnx_cdata.bnx_tx_mtag,
3507 sc->bnx_cdata.bnx_tx_dmamap[i]);
3508 }
3509 bus_dma_tag_destroy(sc->bnx_cdata.bnx_tx_mtag);
3510 }
3511
3512 /* Destroy standard RX ring */
3513 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_std_ring_tag,
3514 sc->bnx_cdata.bnx_rx_std_ring_map,
3515 sc->bnx_ldata.bnx_rx_std_ring);
3516
3517 if (BNX_IS_JUMBO_CAPABLE(sc))
3518 bnx_free_jumbo_mem(sc);
3519
3520 /* Destroy RX return ring */
3521 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_return_ring_tag,
3522 sc->bnx_cdata.bnx_rx_return_ring_map,
3523 sc->bnx_ldata.bnx_rx_return_ring);
3524
3525 /* Destroy TX ring */
3526 bnx_dma_block_free(sc->bnx_cdata.bnx_tx_ring_tag,
3527 sc->bnx_cdata.bnx_tx_ring_map,
3528 sc->bnx_ldata.bnx_tx_ring);
3529
3530 /* Destroy status block */
3531 bnx_dma_block_free(sc->bnx_cdata.bnx_status_tag,
3532 sc->bnx_cdata.bnx_status_map,
3533 sc->bnx_ldata.bnx_status_block);
3534
3535 /* Destroy the parent tag */
3536 if (sc->bnx_cdata.bnx_parent_tag != NULL)
3537 bus_dma_tag_destroy(sc->bnx_cdata.bnx_parent_tag);
3538}
3539
3540static int
3541bnx_dma_alloc(struct bnx_softc *sc)
3542{
3543 struct ifnet *ifp = &sc->arpcom.ac_if;
66deb1c1 3544 bus_size_t txmaxsz;
6c8d8ecc
SZ
3545 int i, error;
3546
3547 /*
3548 * Allocate the parent bus DMA tag appropriate for PCI.
3549 *
3550 * All of the NetExtreme/NetLink controllers have 4GB boundary
3551 * DMA bug.
3552 * Whenever an address crosses a multiple of the 4GB boundary
3553 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
3554 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
3555 * state machine will lockup and cause the device to hang.
3556 */
3557 error = bus_dma_tag_create(NULL, 1, BGE_DMA_BOUNDARY_4G,
3558 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3559 NULL, NULL,
3560 BUS_SPACE_MAXSIZE_32BIT, 0,
3561 BUS_SPACE_MAXSIZE_32BIT,
3562 0, &sc->bnx_cdata.bnx_parent_tag);
3563 if (error) {
3564 if_printf(ifp, "could not allocate parent dma tag\n");
3565 return error;
3566 }
3567
3568 /*
3569 * Create DMA tag and maps for RX mbufs.
3570 */
3571 error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0,
3572 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3573 NULL, NULL, MCLBYTES, 1, MCLBYTES,
3574 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
3575 &sc->bnx_cdata.bnx_rx_mtag);
3576 if (error) {
3577 if_printf(ifp, "could not allocate RX mbuf dma tag\n");
3578 return error;
3579 }
3580
3581 error = bus_dmamap_create(sc->bnx_cdata.bnx_rx_mtag,
3582 BUS_DMA_WAITOK, &sc->bnx_cdata.bnx_rx_tmpmap);
3583 if (error) {
3584 bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag);
3585 sc->bnx_cdata.bnx_rx_mtag = NULL;
3586 return error;
3587 }
3588
3589 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3590 error = bus_dmamap_create(sc->bnx_cdata.bnx_rx_mtag,
3591 BUS_DMA_WAITOK,
3592 &sc->bnx_cdata.bnx_rx_std_dmamap[i]);
3593 if (error) {
3594 int j;
3595
3596 for (j = 0; j < i; ++j) {
3597 bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag,
3598 sc->bnx_cdata.bnx_rx_std_dmamap[j]);
3599 }
3600 bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag);
3601 sc->bnx_cdata.bnx_rx_mtag = NULL;
3602
3603 if_printf(ifp, "could not create DMA map for RX\n");
3604 return error;
3605 }
3606 }
3607
3608 /*
3609 * Create DMA tag and maps for TX mbufs.
3610 */
66deb1c1
SZ
3611 if (sc->bnx_flags & BNX_FLAG_TSO)
3612 txmaxsz = IP_MAXPACKET + sizeof(struct ether_vlan_header);
3613 else
3614 txmaxsz = BNX_JUMBO_FRAMELEN;
6c8d8ecc
SZ
3615 error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0,
3616 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3617 NULL, NULL,
66deb1c1 3618 txmaxsz, BNX_NSEG_NEW, PAGE_SIZE,
6c8d8ecc
SZ
3619 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK |
3620 BUS_DMA_ONEBPAGE,
3621 &sc->bnx_cdata.bnx_tx_mtag);
3622 if (error) {
3623 if_printf(ifp, "could not allocate TX mbuf dma tag\n");
3624 return error;
3625 }
3626
3627 for (i = 0; i < BGE_TX_RING_CNT; i++) {
3628 error = bus_dmamap_create(sc->bnx_cdata.bnx_tx_mtag,
3629 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
3630 &sc->bnx_cdata.bnx_tx_dmamap[i]);
3631 if (error) {
3632 int j;
3633
3634 for (j = 0; j < i; ++j) {
3635 bus_dmamap_destroy(sc->bnx_cdata.bnx_tx_mtag,
3636 sc->bnx_cdata.bnx_tx_dmamap[j]);
3637 }
3638 bus_dma_tag_destroy(sc->bnx_cdata.bnx_tx_mtag);
3639 sc->bnx_cdata.bnx_tx_mtag = NULL;
3640
3641 if_printf(ifp, "could not create DMA map for TX\n");
3642 return error;
3643 }
3644 }
3645
3646 /*
3647 * Create DMA stuffs for standard RX ring.
3648 */
3649 error = bnx_dma_block_alloc(sc, BGE_STD_RX_RING_SZ,
3650 &sc->bnx_cdata.bnx_rx_std_ring_tag,
3651 &sc->bnx_cdata.bnx_rx_std_ring_map,
3652 (void *)&sc->bnx_ldata.bnx_rx_std_ring,
3653 &sc->bnx_ldata.bnx_rx_std_ring_paddr);
3654 if (error) {
3655 if_printf(ifp, "could not create std RX ring\n");
3656 return error;
3657 }
3658
3659 /*
3660 * Create jumbo buffer pool.
3661 */
3662 if (BNX_IS_JUMBO_CAPABLE(sc)) {
3663 error = bnx_alloc_jumbo_mem(sc);
3664 if (error) {
3665 if_printf(ifp, "could not create jumbo buffer pool\n");
3666 return error;
3667 }
3668 }
3669
3670 /*
3671 * Create DMA stuffs for RX return ring.
3672 */
3673 error = bnx_dma_block_alloc(sc,
3674 BGE_RX_RTN_RING_SZ(sc->bnx_return_ring_cnt),
3675 &sc->bnx_cdata.bnx_rx_return_ring_tag,
3676 &sc->bnx_cdata.bnx_rx_return_ring_map,
3677 (void *)&sc->bnx_ldata.bnx_rx_return_ring,
3678 &sc->bnx_ldata.bnx_rx_return_ring_paddr);
3679 if (error) {
3680 if_printf(ifp, "could not create RX ret ring\n");
3681 return error;
3682 }
3683
3684 /*
3685 * Create DMA stuffs for TX ring.
3686 */
3687 error = bnx_dma_block_alloc(sc, BGE_TX_RING_SZ,
3688 &sc->bnx_cdata.bnx_tx_ring_tag,
3689 &sc->bnx_cdata.bnx_tx_ring_map,
3690 (void *)&sc->bnx_ldata.bnx_tx_ring,
3691 &sc->bnx_ldata.bnx_tx_ring_paddr);
3692 if (error) {
3693 if_printf(ifp, "could not create TX ring\n");
3694 return error;
3695 }
3696
3697 /*
3698 * Create DMA stuffs for status block.
3699 */
3700 error = bnx_dma_block_alloc(sc, BGE_STATUS_BLK_SZ,
3701 &sc->bnx_cdata.bnx_status_tag,
3702 &sc->bnx_cdata.bnx_status_map,
3703 (void *)&sc->bnx_ldata.bnx_status_block,
3704 &sc->bnx_ldata.bnx_status_block_paddr);
3705 if (error) {
3706 if_printf(ifp, "could not create status block\n");
3707 return error;
3708 }
3709
3710 return 0;
3711}
3712
3713static int
3714bnx_dma_block_alloc(struct bnx_softc *sc, bus_size_t size, bus_dma_tag_t *tag,
3715 bus_dmamap_t *map, void **addr, bus_addr_t *paddr)
3716{
3717 bus_dmamem_t dmem;
3718 int error;
3719
3720 error = bus_dmamem_coherent(sc->bnx_cdata.bnx_parent_tag, PAGE_SIZE, 0,
3721 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3722 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3723 if (error)
3724 return error;
3725
3726 *tag = dmem.dmem_tag;
3727 *map = dmem.dmem_map;
3728 *addr = dmem.dmem_addr;
3729 *paddr = dmem.dmem_busaddr;
3730
3731 return 0;
3732}
3733
3734static void
3735bnx_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr)
3736{
3737 if (tag != NULL) {
3738 bus_dmamap_unload(tag, map);
3739 bus_dmamem_free(tag, addr, map);
3740 bus_dma_tag_destroy(tag);
3741 }
3742}
3743
3744static void
3745bnx_tbi_link_upd(struct bnx_softc *sc, uint32_t status)
3746{
3747 struct ifnet *ifp = &sc->arpcom.ac_if;
3748
3749#define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE)
3750
3751 /*
3752 * Sometimes PCS encoding errors are detected in
3753 * TBI mode (on fiber NICs), and for some reason
3754 * the chip will signal them as link changes.
3755 * If we get a link change event, but the 'PCS
3756 * encoding error' bit in the MAC status register
3757 * is set, don't bother doing a link check.
3758 * This avoids spurious "gigabit link up" messages
3759 * that sometimes appear on fiber NICs during
3760 * periods of heavy traffic.
3761 */
3762 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3763 if (!sc->bnx_link) {
3764 sc->bnx_link++;
3765 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704) {
3766 BNX_CLRBIT(sc, BGE_MAC_MODE,
3767 BGE_MACMODE_TBI_SEND_CFGS);
3768 }
3769 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3770
3771 if (bootverbose)
3772 if_printf(ifp, "link UP\n");
3773
3774 ifp->if_link_state = LINK_STATE_UP;
3775 if_link_state_change(ifp);
3776 }
3777 } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) {
3778 if (sc->bnx_link) {
3779 sc->bnx_link = 0;
3780
3781 if (bootverbose)
3782 if_printf(ifp, "link DOWN\n");
3783
3784 ifp->if_link_state = LINK_STATE_DOWN;
3785 if_link_state_change(ifp);
3786 }
3787 }
3788
3789#undef PCS_ENCODE_ERR
3790
3791 /* Clear the attention. */
3792 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3793 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3794 BGE_MACSTAT_LINK_CHANGED);
3795}
3796
3797static void
3798bnx_copper_link_upd(struct bnx_softc *sc, uint32_t status __unused)
3799{
3800 struct ifnet *ifp = &sc->arpcom.ac_if;
3801 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3802
3803 mii_pollstat(mii);
3804 bnx_miibus_statchg(sc->bnx_dev);
3805
3806 if (bootverbose) {
3807 if (sc->bnx_link)
3808 if_printf(ifp, "link UP\n");
3809 else
3810 if_printf(ifp, "link DOWN\n");
3811 }
3812
3813 /* Clear the attention. */
3814 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3815 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3816 BGE_MACSTAT_LINK_CHANGED);
3817}
3818
3819static void
3820bnx_autopoll_link_upd(struct bnx_softc *sc, uint32_t status __unused)
3821{
3822 struct ifnet *ifp = &sc->arpcom.ac_if;
3823 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3824
3825 mii_pollstat(mii);
3826
3827 if (!sc->bnx_link &&
3828 (mii->mii_media_status & IFM_ACTIVE) &&
3829 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3830 sc->bnx_link++;
3831 if (bootverbose)
3832 if_printf(ifp, "link UP\n");
3833 } else if (sc->bnx_link &&
3834 (!(mii->mii_media_status & IFM_ACTIVE) ||
3835 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3836 sc->bnx_link = 0;
3837 if (bootverbose)
3838 if_printf(ifp, "link DOWN\n");
3839 }
3840
3841 /* Clear the attention. */
3842 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3843 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3844 BGE_MACSTAT_LINK_CHANGED);
3845}
3846
3847static int
3848bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS)
3849{
3850 struct bnx_softc *sc = arg1;
3851
3852 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3853 &sc->bnx_rx_coal_ticks,
3854 BNX_RX_COAL_TICKS_MIN, BNX_RX_COAL_TICKS_MAX,
3855 BNX_RX_COAL_TICKS_CHG);
3856}
3857
3858static int
3859bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS)
3860{
3861 struct bnx_softc *sc = arg1;
3862
3863 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3864 &sc->bnx_tx_coal_ticks,
3865 BNX_TX_COAL_TICKS_MIN, BNX_TX_COAL_TICKS_MAX,
3866 BNX_TX_COAL_TICKS_CHG);
3867}
3868
3869static int
3870bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS)
3871{
3872 struct bnx_softc *sc = arg1;
3873
3874 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3875 &sc->bnx_rx_coal_bds,
3876 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX,
3877 BNX_RX_COAL_BDS_CHG);
3878}
3879
3880static int
3881bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS)
3882{
3883 struct bnx_softc *sc = arg1;
3884
3885 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3886 &sc->bnx_tx_coal_bds,
3887 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX,
3888 BNX_TX_COAL_BDS_CHG);
3889}
3890
3891static int
3892bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS)
3893{
3894 struct bnx_softc *sc = arg1;
3895
3896 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3897 &sc->bnx_rx_coal_bds_int,
3898 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX,
3899 BNX_RX_COAL_BDS_INT_CHG);
3900}
3901
3902static int
3903bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS)
3904{
3905 struct bnx_softc *sc = arg1;
3906
3907 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3908 &sc->bnx_tx_coal_bds_int,
3909 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX,
3910 BNX_TX_COAL_BDS_INT_CHG);
3911}
3912
3913static int
3914bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal,
3915 int coal_min, int coal_max, uint32_t coal_chg_mask)
3916{
3917 struct bnx_softc *sc = arg1;
3918 struct ifnet *ifp = &sc->arpcom.ac_if;
3919 int error = 0, v;
3920
3921 lwkt_serialize_enter(ifp->if_serializer);
3922
3923 v = *coal;
3924 error = sysctl_handle_int(oidp, &v, 0, req);
3925 if (!error && req->newptr != NULL) {
3926 if (v < coal_min || v > coal_max) {
3927 error = EINVAL;
3928 } else {
3929 *coal = v;
3930 sc->bnx_coal_chg |= coal_chg_mask;
3931 }
3932 }
3933
3934 lwkt_serialize_exit(ifp->if_serializer);
3935 return error;
3936}
3937
39a8d43a
SZ
3938#ifdef IFPOLL_ENABLE
3939
3940static int
3941bnx_sysctl_npoll_stfrac(SYSCTL_HANDLER_ARGS)
3942{
3943 struct bnx_softc *sc = arg1;
3944 struct ifnet *ifp = &sc->arpcom.ac_if;
3945 int error = 0, stfrac;
3946
3947 lwkt_serialize_enter(ifp->if_serializer);
3948
0c7fdccd 3949 stfrac = sc->bnx_npoll_stfrac + 1;
39a8d43a
SZ
3950 error = sysctl_handle_int(oidp, &stfrac, 0, req);
3951 if (!error && req->newptr != NULL) {
0c7fdccd 3952 if (stfrac < 1) {
39a8d43a
SZ
3953 error = EINVAL;
3954 } else {
0c7fdccd 3955 sc->bnx_npoll_stfrac = stfrac - 1;
39a8d43a
SZ
3956 if (sc->bnx_npoll_stcount > sc->bnx_npoll_stfrac)
3957 sc->bnx_npoll_stcount = sc->bnx_npoll_stfrac;
3958 }
3959 }
3960
3961 lwkt_serialize_exit(ifp->if_serializer);
3962 return error;
3963}
3964
3965static int
3966bnx_sysctl_npoll_cpuid(SYSCTL_HANDLER_ARGS)
3967{
3968 struct bnx_softc *sc = arg1;
3969 struct ifnet *ifp = &sc->arpcom.ac_if;
3970 int error = 0, cpuid;
3971
3972 lwkt_serialize_enter(ifp->if_serializer);
3973
3974 cpuid = sc->bnx_npoll_cpuid;
3975 error = sysctl_handle_int(oidp, &cpuid, 0, req);
3976 if (!error && req->newptr != NULL) {
3977 if (cpuid < 0 || cpuid >= ncpus2)
3978 error = EINVAL;
3979 else
3980 sc->bnx_npoll_cpuid = cpuid;
3981 }
3982
3983 lwkt_serialize_exit(ifp->if_serializer);
3984 return error;
3985}
3986
3987#endif /* IFPOLL_ENABLE */
3988
6c8d8ecc
SZ
3989static void
3990bnx_coal_change(struct bnx_softc *sc)
3991{
3992 struct ifnet *ifp = &sc->arpcom.ac_if;
3993 uint32_t val;
3994
3995 ASSERT_SERIALIZED(ifp->if_serializer);
3996
3997 if (sc->bnx_coal_chg & BNX_RX_COAL_TICKS_CHG) {
3998 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS,
3999 sc->bnx_rx_coal_ticks);
4000 DELAY(10);
4001 val = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
4002
4003 if (bootverbose) {
4004 if_printf(ifp, "rx_coal_ticks -> %u\n",
4005 sc->bnx_rx_coal_ticks);
4006 }
4007 }
4008
4009 if (sc->bnx_coal_chg & BNX_TX_COAL_TICKS_CHG) {
4010 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS,
4011 sc->bnx_tx_coal_ticks);
4012 DELAY(10);
4013 val = CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS);
4014
4015 if (bootverbose) {
4016 if_printf(ifp, "tx_coal_ticks -> %u\n",
4017 sc->bnx_tx_coal_ticks);
4018 }
4019 }
4020
4021 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_CHG) {
4022 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS,
4023 sc->bnx_rx_coal_bds);
4024 DELAY(10);
4025 val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
4026
4027 if (bootverbose) {
4028 if_printf(ifp, "rx_coal_bds -> %u\n",
4029 sc->bnx_rx_coal_bds);
4030 }
4031 }
4032
4033 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_CHG) {
4034 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS,
4035 sc->bnx_tx_coal_bds);
4036 DELAY(10);
4037 val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS);
4038
4039 if (bootverbose) {
cc98a7c2 4040 if_printf(ifp, "tx_coal_bds -> %u\n",
6c8d8ecc
SZ
4041 sc->bnx_tx_coal_bds);
4042 }
4043 }
4044
4045 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_INT_CHG) {
4046 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT,
4047 sc->bnx_rx_coal_bds_int);
4048 DELAY(10);
4049 val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT);
4050
4051 if (bootverbose) {
4052 if_printf(ifp, "rx_coal_bds_int -> %u\n",
4053 sc->bnx_rx_coal_bds_int);
4054 }
4055 }
4056