bnx: Dispatch state timer to the same CPU as MSI/legacy interrupt CPU
[dragonfly.git] / sys / dev / netif / bnx / if_bnx.c
CommitLineData
6c8d8ecc
SZ
1/*
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
34 */
35
36
37#include "opt_polling.h"
38
39#include <sys/param.h>
40#include <sys/bus.h>
41#include <sys/endian.h>
42#include <sys/kernel.h>
43#include <sys/interrupt.h>
44#include <sys/mbuf.h>
45#include <sys/malloc.h>
46#include <sys/queue.h>
47#include <sys/rman.h>
48#include <sys/serialize.h>
49#include <sys/socket.h>
50#include <sys/sockio.h>
51#include <sys/sysctl.h>
52
53#include <net/bpf.h>
54#include <net/ethernet.h>
55#include <net/if.h>
56#include <net/if_arp.h>
57#include <net/if_dl.h>
58#include <net/if_media.h>
59#include <net/if_types.h>
60#include <net/ifq_var.h>
61#include <net/vlan/if_vlan_var.h>
62#include <net/vlan/if_vlan_ether.h>
63
64#include <dev/netif/mii_layer/mii.h>
65#include <dev/netif/mii_layer/miivar.h>
66#include <dev/netif/mii_layer/brgphyreg.h>
67
68#include <bus/pci/pcidevs.h>
69#include <bus/pci/pcireg.h>
70#include <bus/pci/pcivar.h>
71
72#include <dev/netif/bge/if_bgereg.h>
73#include <dev/netif/bnx/if_bnxvar.h>
74
75/* "device miibus" required. See GENERIC if you get errors here. */
76#include "miibus_if.h"
77
3b18363f 78#define BNX_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
6c8d8ecc
SZ
79
80static const struct bnx_type {
81 uint16_t bnx_vid;
82 uint16_t bnx_did;
83 char *bnx_name;
84} bnx_devs[] = {
85 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717,
86 "Broadcom BCM5717 Gigabit Ethernet" },
87 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718,
88 "Broadcom BCM5718 Gigabit Ethernet" },
89 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5719,
90 "Broadcom BCM5719 Gigabit Ethernet" },
91 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720_ALT,
92 "Broadcom BCM5720 Gigabit Ethernet" },
93
94 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57761,
95 "Broadcom BCM57761 Gigabit Ethernet" },
32ff3c80
SZ
96 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57762,
97 "Broadcom BCM57762 Gigabit Ethernet" },
6c8d8ecc
SZ
98 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57765,
99 "Broadcom BCM57765 Gigabit Ethernet" },
32ff3c80
SZ
100 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57766,
101 "Broadcom BCM57766 Gigabit Ethernet" },
102 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57781,
103 "Broadcom BCM57781 Gigabit Ethernet" },
104 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57782,
105 "Broadcom BCM57782 Gigabit Ethernet" },
6c8d8ecc
SZ
106 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57785,
107 "Broadcom BCM57785 Gigabit Ethernet" },
32ff3c80
SZ
108 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57786,
109 "Broadcom BCM57786 Gigabit Ethernet" },
110 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57791,
111 "Broadcom BCM57791 Fast Ethernet" },
6c8d8ecc
SZ
112 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57795,
113 "Broadcom BCM57795 Fast Ethernet" },
114
115 { 0, 0, NULL }
116};
117
118#define BNX_IS_JUMBO_CAPABLE(sc) ((sc)->bnx_flags & BNX_FLAG_JUMBO)
119#define BNX_IS_5717_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_5717_PLUS)
f368d0d9
SZ
120#define BNX_IS_57765_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_57765_PLUS)
121#define BNX_IS_57765_FAMILY(sc) \
122 ((sc)->bnx_flags & BNX_FLAG_57765_FAMILY)
6c8d8ecc
SZ
123
124typedef int (*bnx_eaddr_fcn_t)(struct bnx_softc *, uint8_t[]);
125
126static int bnx_probe(device_t);
127static int bnx_attach(device_t);
128static int bnx_detach(device_t);
129static void bnx_shutdown(device_t);
130static int bnx_suspend(device_t);
131static int bnx_resume(device_t);
132static int bnx_miibus_readreg(device_t, int, int);
133static int bnx_miibus_writereg(device_t, int, int, int);
134static void bnx_miibus_statchg(device_t);
135
136#ifdef DEVICE_POLLING
137static void bnx_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
138#endif
139static void bnx_intr_legacy(void *);
140static void bnx_msi(void *);
141static void bnx_msi_oneshot(void *);
142static void bnx_intr(struct bnx_softc *);
143static void bnx_enable_intr(struct bnx_softc *);
144static void bnx_disable_intr(struct bnx_softc *);
145static void bnx_txeof(struct bnx_softc *, uint16_t);
146static void bnx_rxeof(struct bnx_softc *, uint16_t);
147
148static void bnx_start(struct ifnet *);
149static int bnx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
150static void bnx_init(void *);
151static void bnx_stop(struct bnx_softc *);
152static void bnx_watchdog(struct ifnet *);
153static int bnx_ifmedia_upd(struct ifnet *);
154static void bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *);
155static void bnx_tick(void *);
156
157static int bnx_alloc_jumbo_mem(struct bnx_softc *);
158static void bnx_free_jumbo_mem(struct bnx_softc *);
159static struct bnx_jslot
160 *bnx_jalloc(struct bnx_softc *);
161static void bnx_jfree(void *);
162static void bnx_jref(void *);
163static int bnx_newbuf_std(struct bnx_softc *, int, int);
164static int bnx_newbuf_jumbo(struct bnx_softc *, int, int);
165static void bnx_setup_rxdesc_std(struct bnx_softc *, int);
166static void bnx_setup_rxdesc_jumbo(struct bnx_softc *, int);
167static int bnx_init_rx_ring_std(struct bnx_softc *);
168static void bnx_free_rx_ring_std(struct bnx_softc *);
169static int bnx_init_rx_ring_jumbo(struct bnx_softc *);
170static void bnx_free_rx_ring_jumbo(struct bnx_softc *);
171static void bnx_free_tx_ring(struct bnx_softc *);
172static int bnx_init_tx_ring(struct bnx_softc *);
173static int bnx_dma_alloc(struct bnx_softc *);
174static void bnx_dma_free(struct bnx_softc *);
175static int bnx_dma_block_alloc(struct bnx_softc *, bus_size_t,
176 bus_dma_tag_t *, bus_dmamap_t *, void **, bus_addr_t *);
177static void bnx_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
178static struct mbuf *
179 bnx_defrag_shortdma(struct mbuf *);
180static int bnx_encap(struct bnx_softc *, struct mbuf **, uint32_t *);
181
182static void bnx_reset(struct bnx_softc *);
183static int bnx_chipinit(struct bnx_softc *);
184static int bnx_blockinit(struct bnx_softc *);
185static void bnx_stop_block(struct bnx_softc *, bus_size_t, uint32_t);
186static void bnx_enable_msi(struct bnx_softc *sc);
187static void bnx_setmulti(struct bnx_softc *);
188static void bnx_setpromisc(struct bnx_softc *);
189static void bnx_stats_update_regs(struct bnx_softc *);
190static uint32_t bnx_dma_swap_options(struct bnx_softc *);
191
192static uint32_t bnx_readmem_ind(struct bnx_softc *, uint32_t);
193static void bnx_writemem_ind(struct bnx_softc *, uint32_t, uint32_t);
194#ifdef notdef
195static uint32_t bnx_readreg_ind(struct bnx_softc *, uint32_t);
196#endif
197static void bnx_writereg_ind(struct bnx_softc *, uint32_t, uint32_t);
198static void bnx_writemem_direct(struct bnx_softc *, uint32_t, uint32_t);
199static void bnx_writembx(struct bnx_softc *, int, int);
200static uint8_t bnx_nvram_getbyte(struct bnx_softc *, int, uint8_t *);
201static int bnx_read_nvram(struct bnx_softc *, caddr_t, int, int);
202static uint8_t bnx_eeprom_getbyte(struct bnx_softc *, uint32_t, uint8_t *);
203static int bnx_read_eeprom(struct bnx_softc *, caddr_t, uint32_t, size_t);
204
205static void bnx_tbi_link_upd(struct bnx_softc *, uint32_t);
206static void bnx_copper_link_upd(struct bnx_softc *, uint32_t);
207static void bnx_autopoll_link_upd(struct bnx_softc *, uint32_t);
208static void bnx_link_poll(struct bnx_softc *);
209
210static int bnx_get_eaddr_mem(struct bnx_softc *, uint8_t[]);
211static int bnx_get_eaddr_nvram(struct bnx_softc *, uint8_t[]);
212static int bnx_get_eaddr_eeprom(struct bnx_softc *, uint8_t[]);
213static int bnx_get_eaddr(struct bnx_softc *, uint8_t[]);
214
215static void bnx_coal_change(struct bnx_softc *);
216static int bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS);
217static int bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS);
218static int bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS);
219static int bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS);
220static int bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS);
221static int bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS);
222static int bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *,
223 int, int, uint32_t);
224
225static int bnx_msi_enable = 1;
226TUNABLE_INT("hw.bnx.msi.enable", &bnx_msi_enable);
227
228static device_method_t bnx_methods[] = {
229 /* Device interface */
230 DEVMETHOD(device_probe, bnx_probe),
231 DEVMETHOD(device_attach, bnx_attach),
232 DEVMETHOD(device_detach, bnx_detach),
233 DEVMETHOD(device_shutdown, bnx_shutdown),
234 DEVMETHOD(device_suspend, bnx_suspend),
235 DEVMETHOD(device_resume, bnx_resume),
236
237 /* bus interface */
238 DEVMETHOD(bus_print_child, bus_generic_print_child),
239 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
240
241 /* MII interface */
242 DEVMETHOD(miibus_readreg, bnx_miibus_readreg),
243 DEVMETHOD(miibus_writereg, bnx_miibus_writereg),
244 DEVMETHOD(miibus_statchg, bnx_miibus_statchg),
245
246 { 0, 0 }
247};
248
249static DEFINE_CLASS_0(bnx, bnx_driver, bnx_methods, sizeof(struct bnx_softc));
250static devclass_t bnx_devclass;
251
252DECLARE_DUMMY_MODULE(if_bnx);
253DRIVER_MODULE(if_bnx, pci, bnx_driver, bnx_devclass, NULL, NULL);
254DRIVER_MODULE(miibus, bnx, miibus_driver, miibus_devclass, NULL, NULL);
255
256static uint32_t
257bnx_readmem_ind(struct bnx_softc *sc, uint32_t off)
258{
259 device_t dev = sc->bnx_dev;
260 uint32_t val;
261
262 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
263 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
264 return 0;
265
266 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
267 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
268 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
269 return (val);
270}
271
272static void
273bnx_writemem_ind(struct bnx_softc *sc, uint32_t off, uint32_t val)
274{
275 device_t dev = sc->bnx_dev;
276
277 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
278 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
279 return;
280
281 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
282 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
283 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
284}
285
286#ifdef notdef
287static uint32_t
288bnx_readreg_ind(struct bnx_softc *sc, uin32_t off)
289{
290 device_t dev = sc->bnx_dev;
291
292 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
293 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
294}
295#endif
296
297static void
298bnx_writereg_ind(struct bnx_softc *sc, uint32_t off, uint32_t val)
299{
300 device_t dev = sc->bnx_dev;
301
302 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
303 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
304}
305
306static void
307bnx_writemem_direct(struct bnx_softc *sc, uint32_t off, uint32_t val)
308{
309 CSR_WRITE_4(sc, off, val);
310}
311
312static void
313bnx_writembx(struct bnx_softc *sc, int off, int val)
314{
315 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906)
316 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
317
318 CSR_WRITE_4(sc, off, val);
319}
320
321static uint8_t
322bnx_nvram_getbyte(struct bnx_softc *sc, int addr, uint8_t *dest)
323{
324 uint32_t access, byte = 0;
325 int i;
326
327 /* Lock. */
328 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
329 for (i = 0; i < 8000; i++) {
330 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
331 break;
332 DELAY(20);
333 }
334 if (i == 8000)
335 return (1);
336
337 /* Enable access. */
338 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
339 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
340
341 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
342 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
343 for (i = 0; i < BNX_TIMEOUT * 10; i++) {
344 DELAY(10);
345 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
346 DELAY(10);
347 break;
348 }
349 }
350
351 if (i == BNX_TIMEOUT * 10) {
352 if_printf(&sc->arpcom.ac_if, "nvram read timed out\n");
353 return (1);
354 }
355
356 /* Get result. */
357 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
358
359 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
360
361 /* Disable access. */
362 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
363
364 /* Unlock. */
365 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
366 CSR_READ_4(sc, BGE_NVRAM_SWARB);
367
368 return (0);
369}
370
371/*
372 * Read a sequence of bytes from NVRAM.
373 */
374static int
375bnx_read_nvram(struct bnx_softc *sc, caddr_t dest, int off, int cnt)
376{
377 int err = 0, i;
378 uint8_t byte = 0;
379
380 if (sc->bnx_asicrev != BGE_ASICREV_BCM5906)
381 return (1);
382
383 for (i = 0; i < cnt; i++) {
384 err = bnx_nvram_getbyte(sc, off + i, &byte);
385 if (err)
386 break;
387 *(dest + i) = byte;
388 }
389
390 return (err ? 1 : 0);
391}
392
393/*
394 * Read a byte of data stored in the EEPROM at address 'addr.' The
395 * BCM570x supports both the traditional bitbang interface and an
396 * auto access interface for reading the EEPROM. We use the auto
397 * access method.
398 */
399static uint8_t
400bnx_eeprom_getbyte(struct bnx_softc *sc, uint32_t addr, uint8_t *dest)
401{
402 int i;
403 uint32_t byte = 0;
404
405 /*
406 * Enable use of auto EEPROM access so we can avoid
407 * having to use the bitbang method.
408 */
409 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
410
411 /* Reset the EEPROM, load the clock period. */
412 CSR_WRITE_4(sc, BGE_EE_ADDR,
413 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
414 DELAY(20);
415
416 /* Issue the read EEPROM command. */
417 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
418
419 /* Wait for completion */
420 for(i = 0; i < BNX_TIMEOUT * 10; i++) {
421 DELAY(10);
422 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
423 break;
424 }
425
426 if (i == BNX_TIMEOUT) {
427 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
428 return(1);
429 }
430
431 /* Get result. */
432 byte = CSR_READ_4(sc, BGE_EE_DATA);
433
434 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
435
436 return(0);
437}
438
439/*
440 * Read a sequence of bytes from the EEPROM.
441 */
442static int
443bnx_read_eeprom(struct bnx_softc *sc, caddr_t dest, uint32_t off, size_t len)
444{
445 size_t i;
446 int err;
447 uint8_t byte;
448
449 for (byte = 0, err = 0, i = 0; i < len; i++) {
450 err = bnx_eeprom_getbyte(sc, off + i, &byte);
451 if (err)
452 break;
453 *(dest + i) = byte;
454 }
455
456 return(err ? 1 : 0);
457}
458
459static int
460bnx_miibus_readreg(device_t dev, int phy, int reg)
461{
462 struct bnx_softc *sc = device_get_softc(dev);
463 uint32_t val;
464 int i;
465
466 KASSERT(phy == sc->bnx_phyno,
467 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno));
468
469 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
470 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
471 CSR_WRITE_4(sc, BGE_MI_MODE,
472 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
473 DELAY(80);
474 }
475
476 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
477 BGE_MIPHY(phy) | BGE_MIREG(reg));
478
479 /* Poll for the PHY register access to complete. */
480 for (i = 0; i < BNX_TIMEOUT; i++) {
481 DELAY(10);
482 val = CSR_READ_4(sc, BGE_MI_COMM);
483 if ((val & BGE_MICOMM_BUSY) == 0) {
484 DELAY(5);
485 val = CSR_READ_4(sc, BGE_MI_COMM);
486 break;
487 }
488 }
489 if (i == BNX_TIMEOUT) {
490 if_printf(&sc->arpcom.ac_if, "PHY read timed out "
491 "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val);
492 val = 0;
493 }
494
495 /* Restore the autopoll bit if necessary. */
496 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
497 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
498 DELAY(80);
499 }
500
501 if (val & BGE_MICOMM_READFAIL)
502 return 0;
503
504 return (val & 0xFFFF);
505}
506
507static int
508bnx_miibus_writereg(device_t dev, int phy, int reg, int val)
509{
510 struct bnx_softc *sc = device_get_softc(dev);
511 int i;
512
513 KASSERT(phy == sc->bnx_phyno,
514 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno));
515
516 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
517 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
518 return 0;
519
520 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
521 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
522 CSR_WRITE_4(sc, BGE_MI_MODE,
523 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
524 DELAY(80);
525 }
526
527 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
528 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
529
530 for (i = 0; i < BNX_TIMEOUT; i++) {
531 DELAY(10);
532 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
533 DELAY(5);
534 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
535 break;
536 }
537 }
538 if (i == BNX_TIMEOUT) {
539 if_printf(&sc->arpcom.ac_if, "PHY write timed out "
540 "(phy %d, reg %d, val %d)\n", phy, reg, val);
541 }
542
543 /* Restore the autopoll bit if necessary. */
544 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
545 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
546 DELAY(80);
547 }
548
549 return 0;
550}
551
552static void
553bnx_miibus_statchg(device_t dev)
554{
555 struct bnx_softc *sc;
556 struct mii_data *mii;
557
558 sc = device_get_softc(dev);
559 mii = device_get_softc(sc->bnx_miibus);
560
561 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
562 (IFM_ACTIVE | IFM_AVALID)) {
563 switch (IFM_SUBTYPE(mii->mii_media_active)) {
564 case IFM_10_T:
565 case IFM_100_TX:
566 sc->bnx_link = 1;
567 break;
568 case IFM_1000_T:
569 case IFM_1000_SX:
570 case IFM_2500_SX:
571 if (sc->bnx_asicrev != BGE_ASICREV_BCM5906)
572 sc->bnx_link = 1;
573 else
574 sc->bnx_link = 0;
575 break;
576 default:
577 sc->bnx_link = 0;
578 break;
579 }
580 } else {
581 sc->bnx_link = 0;
582 }
583 if (sc->bnx_link == 0)
584 return;
585
586 BNX_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
587 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
588 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
589 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
590 } else {
591 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
592 }
593
594 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
595 BNX_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
596 } else {
597 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
598 }
599}
600
601/*
602 * Memory management for jumbo frames.
603 */
604static int
605bnx_alloc_jumbo_mem(struct bnx_softc *sc)
606{
607 struct ifnet *ifp = &sc->arpcom.ac_if;
608 struct bnx_jslot *entry;
609 uint8_t *ptr;
610 bus_addr_t paddr;
611 int i, error;
612
613 /*
614 * Create tag for jumbo mbufs.
615 * This is really a bit of a kludge. We allocate a special
616 * jumbo buffer pool which (thanks to the way our DMA
617 * memory allocation works) will consist of contiguous
618 * pages. This means that even though a jumbo buffer might
619 * be larger than a page size, we don't really need to
620 * map it into more than one DMA segment. However, the
621 * default mbuf tag will result in multi-segment mappings,
622 * so we have to create a special jumbo mbuf tag that
623 * lets us get away with mapping the jumbo buffers as
624 * a single segment. I think eventually the driver should
625 * be changed so that it uses ordinary mbufs and cluster
626 * buffers, i.e. jumbo frames can span multiple DMA
627 * descriptors. But that's a project for another day.
628 */
629
630 /*
631 * Create DMA stuffs for jumbo RX ring.
632 */
633 error = bnx_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ,
634 &sc->bnx_cdata.bnx_rx_jumbo_ring_tag,
635 &sc->bnx_cdata.bnx_rx_jumbo_ring_map,
636 (void *)&sc->bnx_ldata.bnx_rx_jumbo_ring,
637 &sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
638 if (error) {
639 if_printf(ifp, "could not create jumbo RX ring\n");
640 return error;
641 }
642
643 /*
644 * Create DMA stuffs for jumbo buffer block.
645 */
646 error = bnx_dma_block_alloc(sc, BNX_JMEM,
647 &sc->bnx_cdata.bnx_jumbo_tag,
648 &sc->bnx_cdata.bnx_jumbo_map,
649 (void **)&sc->bnx_ldata.bnx_jumbo_buf,
650 &paddr);
651 if (error) {
652 if_printf(ifp, "could not create jumbo buffer\n");
653 return error;
654 }
655
656 SLIST_INIT(&sc->bnx_jfree_listhead);
657
658 /*
659 * Now divide it up into 9K pieces and save the addresses
660 * in an array. Note that we play an evil trick here by using
661 * the first few bytes in the buffer to hold the the address
662 * of the softc structure for this interface. This is because
663 * bnx_jfree() needs it, but it is called by the mbuf management
664 * code which will not pass it to us explicitly.
665 */
666 for (i = 0, ptr = sc->bnx_ldata.bnx_jumbo_buf; i < BNX_JSLOTS; i++) {
667 entry = &sc->bnx_cdata.bnx_jslots[i];
668 entry->bnx_sc = sc;
669 entry->bnx_buf = ptr;
670 entry->bnx_paddr = paddr;
671 entry->bnx_inuse = 0;
672 entry->bnx_slot = i;
673 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead, entry, jslot_link);
674
675 ptr += BNX_JLEN;
676 paddr += BNX_JLEN;
677 }
678 return 0;
679}
680
681static void
682bnx_free_jumbo_mem(struct bnx_softc *sc)
683{
684 /* Destroy jumbo RX ring. */
685 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_jumbo_ring_tag,
686 sc->bnx_cdata.bnx_rx_jumbo_ring_map,
687 sc->bnx_ldata.bnx_rx_jumbo_ring);
688
689 /* Destroy jumbo buffer block. */
690 bnx_dma_block_free(sc->bnx_cdata.bnx_jumbo_tag,
691 sc->bnx_cdata.bnx_jumbo_map,
692 sc->bnx_ldata.bnx_jumbo_buf);
693}
694
695/*
696 * Allocate a jumbo buffer.
697 */
698static struct bnx_jslot *
699bnx_jalloc(struct bnx_softc *sc)
700{
701 struct bnx_jslot *entry;
702
703 lwkt_serialize_enter(&sc->bnx_jslot_serializer);
704 entry = SLIST_FIRST(&sc->bnx_jfree_listhead);
705 if (entry) {
706 SLIST_REMOVE_HEAD(&sc->bnx_jfree_listhead, jslot_link);
707 entry->bnx_inuse = 1;
708 } else {
709 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
710 }
711 lwkt_serialize_exit(&sc->bnx_jslot_serializer);
712 return(entry);
713}
714
715/*
716 * Adjust usage count on a jumbo buffer.
717 */
718static void
719bnx_jref(void *arg)
720{
721 struct bnx_jslot *entry = (struct bnx_jslot *)arg;
722 struct bnx_softc *sc = entry->bnx_sc;
723
724 if (sc == NULL)
725 panic("bnx_jref: can't find softc pointer!");
726
727 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) {
728 panic("bnx_jref: asked to reference buffer "
729 "that we don't manage!");
730 } else if (entry->bnx_inuse == 0) {
731 panic("bnx_jref: buffer already free!");
732 } else {
733 atomic_add_int(&entry->bnx_inuse, 1);
734 }
735}
736
737/*
738 * Release a jumbo buffer.
739 */
740static void
741bnx_jfree(void *arg)
742{
743 struct bnx_jslot *entry = (struct bnx_jslot *)arg;
744 struct bnx_softc *sc = entry->bnx_sc;
745
746 if (sc == NULL)
747 panic("bnx_jfree: can't find softc pointer!");
748
749 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) {
750 panic("bnx_jfree: asked to free buffer that we don't manage!");
751 } else if (entry->bnx_inuse == 0) {
752 panic("bnx_jfree: buffer already free!");
753 } else {
754 /*
755 * Possible MP race to 0, use the serializer. The atomic insn
756 * is still needed for races against bnx_jref().
757 */
758 lwkt_serialize_enter(&sc->bnx_jslot_serializer);
759 atomic_subtract_int(&entry->bnx_inuse, 1);
760 if (entry->bnx_inuse == 0) {
761 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead,
762 entry, jslot_link);
763 }
764 lwkt_serialize_exit(&sc->bnx_jslot_serializer);
765 }
766}
767
768
769/*
770 * Intialize a standard receive ring descriptor.
771 */
772static int
773bnx_newbuf_std(struct bnx_softc *sc, int i, int init)
774{
775 struct mbuf *m_new = NULL;
776 bus_dma_segment_t seg;
777 bus_dmamap_t map;
778 int error, nsegs;
779
780 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
781 if (m_new == NULL)
782 return ENOBUFS;
783 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
784 m_adj(m_new, ETHER_ALIGN);
785
786 error = bus_dmamap_load_mbuf_segment(sc->bnx_cdata.bnx_rx_mtag,
787 sc->bnx_cdata.bnx_rx_tmpmap, m_new,
788 &seg, 1, &nsegs, BUS_DMA_NOWAIT);
789 if (error) {
790 m_freem(m_new);
791 return error;
792 }
793
794 if (!init) {
795 bus_dmamap_sync(sc->bnx_cdata.bnx_rx_mtag,
796 sc->bnx_cdata.bnx_rx_std_dmamap[i],
797 BUS_DMASYNC_POSTREAD);
798 bus_dmamap_unload(sc->bnx_cdata.bnx_rx_mtag,
799 sc->bnx_cdata.bnx_rx_std_dmamap[i]);
800 }
801
802 map = sc->bnx_cdata.bnx_rx_tmpmap;
803 sc->bnx_cdata.bnx_rx_tmpmap = sc->bnx_cdata.bnx_rx_std_dmamap[i];
804 sc->bnx_cdata.bnx_rx_std_dmamap[i] = map;
805
806 sc->bnx_cdata.bnx_rx_std_chain[i].bnx_mbuf = m_new;
807 sc->bnx_cdata.bnx_rx_std_chain[i].bnx_paddr = seg.ds_addr;
808
809 bnx_setup_rxdesc_std(sc, i);
810 return 0;
811}
812
813static void
814bnx_setup_rxdesc_std(struct bnx_softc *sc, int i)
815{
816 struct bnx_rxchain *rc;
817 struct bge_rx_bd *r;
818
819 rc = &sc->bnx_cdata.bnx_rx_std_chain[i];
820 r = &sc->bnx_ldata.bnx_rx_std_ring[i];
821
822 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_paddr);
823 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_paddr);
824 r->bge_len = rc->bnx_mbuf->m_len;
825 r->bge_idx = i;
826 r->bge_flags = BGE_RXBDFLAG_END;
827}
828
829/*
830 * Initialize a jumbo receive ring descriptor. This allocates
831 * a jumbo buffer from the pool managed internally by the driver.
832 */
833static int
834bnx_newbuf_jumbo(struct bnx_softc *sc, int i, int init)
835{
836 struct mbuf *m_new = NULL;
837 struct bnx_jslot *buf;
838 bus_addr_t paddr;
839
840 /* Allocate the mbuf. */
841 MGETHDR(m_new, init ? MB_WAIT : MB_DONTWAIT, MT_DATA);
842 if (m_new == NULL)
843 return ENOBUFS;
844
845 /* Allocate the jumbo buffer */
846 buf = bnx_jalloc(sc);
847 if (buf == NULL) {
848 m_freem(m_new);
849 return ENOBUFS;
850 }
851
852 /* Attach the buffer to the mbuf. */
853 m_new->m_ext.ext_arg = buf;
854 m_new->m_ext.ext_buf = buf->bnx_buf;
855 m_new->m_ext.ext_free = bnx_jfree;
856 m_new->m_ext.ext_ref = bnx_jref;
857 m_new->m_ext.ext_size = BNX_JUMBO_FRAMELEN;
858
859 m_new->m_flags |= M_EXT;
860
861 m_new->m_data = m_new->m_ext.ext_buf;
862 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
863
864 paddr = buf->bnx_paddr;
865 m_adj(m_new, ETHER_ALIGN);
866 paddr += ETHER_ALIGN;
867
868 /* Save necessary information */
869 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_mbuf = m_new;
870 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_paddr = paddr;
871
872 /* Set up the descriptor. */
873 bnx_setup_rxdesc_jumbo(sc, i);
874 return 0;
875}
876
877static void
878bnx_setup_rxdesc_jumbo(struct bnx_softc *sc, int i)
879{
880 struct bge_rx_bd *r;
881 struct bnx_rxchain *rc;
882
883 r = &sc->bnx_ldata.bnx_rx_jumbo_ring[i];
884 rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i];
885
886 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_paddr);
887 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_paddr);
888 r->bge_len = rc->bnx_mbuf->m_len;
889 r->bge_idx = i;
890 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
891}
892
893static int
894bnx_init_rx_ring_std(struct bnx_softc *sc)
895{
896 int i, error;
897
898 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
899 error = bnx_newbuf_std(sc, i, 1);
900 if (error)
901 return error;
902 };
903
904 sc->bnx_std = BGE_STD_RX_RING_CNT - 1;
905 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bnx_std);
906
907 return(0);
908}
909
910static void
911bnx_free_rx_ring_std(struct bnx_softc *sc)
912{
913 int i;
914
915 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
916 struct bnx_rxchain *rc = &sc->bnx_cdata.bnx_rx_std_chain[i];
917
918 if (rc->bnx_mbuf != NULL) {
919 bus_dmamap_unload(sc->bnx_cdata.bnx_rx_mtag,
920 sc->bnx_cdata.bnx_rx_std_dmamap[i]);
921 m_freem(rc->bnx_mbuf);
922 rc->bnx_mbuf = NULL;
923 }
924 bzero(&sc->bnx_ldata.bnx_rx_std_ring[i],
925 sizeof(struct bge_rx_bd));
926 }
927}
928
929static int
930bnx_init_rx_ring_jumbo(struct bnx_softc *sc)
931{
932 struct bge_rcb *rcb;
933 int i, error;
934
935 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
936 error = bnx_newbuf_jumbo(sc, i, 1);
937 if (error)
938 return error;
939 };
940
941 sc->bnx_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
942
943 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb;
944 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
945 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
946
947 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo);
948
949 return(0);
950}
951
952static void
953bnx_free_rx_ring_jumbo(struct bnx_softc *sc)
954{
955 int i;
956
957 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
958 struct bnx_rxchain *rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i];
959
960 if (rc->bnx_mbuf != NULL) {
961 m_freem(rc->bnx_mbuf);
962 rc->bnx_mbuf = NULL;
963 }
964 bzero(&sc->bnx_ldata.bnx_rx_jumbo_ring[i],
965 sizeof(struct bge_rx_bd));
966 }
967}
968
969static void
970bnx_free_tx_ring(struct bnx_softc *sc)
971{
972 int i;
973
974 for (i = 0; i < BGE_TX_RING_CNT; i++) {
975 if (sc->bnx_cdata.bnx_tx_chain[i] != NULL) {
976 bus_dmamap_unload(sc->bnx_cdata.bnx_tx_mtag,
977 sc->bnx_cdata.bnx_tx_dmamap[i]);
978 m_freem(sc->bnx_cdata.bnx_tx_chain[i]);
979 sc->bnx_cdata.bnx_tx_chain[i] = NULL;
980 }
981 bzero(&sc->bnx_ldata.bnx_tx_ring[i],
982 sizeof(struct bge_tx_bd));
983 }
984}
985
986static int
987bnx_init_tx_ring(struct bnx_softc *sc)
988{
989 sc->bnx_txcnt = 0;
990 sc->bnx_tx_saved_considx = 0;
991 sc->bnx_tx_prodidx = 0;
992
993 /* Initialize transmit producer index for host-memory send ring. */
994 bnx_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bnx_tx_prodidx);
995 bnx_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
996
997 return(0);
998}
999
1000static void
1001bnx_setmulti(struct bnx_softc *sc)
1002{
1003 struct ifnet *ifp;
1004 struct ifmultiaddr *ifma;
1005 uint32_t hashes[4] = { 0, 0, 0, 0 };
1006 int h, i;
1007
1008 ifp = &sc->arpcom.ac_if;
1009
1010 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1011 for (i = 0; i < 4; i++)
1012 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1013 return;
1014 }
1015
1016 /* First, zot all the existing filters. */
1017 for (i = 0; i < 4; i++)
1018 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1019
1020 /* Now program new ones. */
1021 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1022 if (ifma->ifma_addr->sa_family != AF_LINK)
1023 continue;
1024 h = ether_crc32_le(
1025 LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1026 ETHER_ADDR_LEN) & 0x7f;
1027 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1028 }
1029
1030 for (i = 0; i < 4; i++)
1031 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1032}
1033
1034/*
1035 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1036 * self-test results.
1037 */
1038static int
1039bnx_chipinit(struct bnx_softc *sc)
1040{
1041 uint32_t dma_rw_ctl, mode_ctl;
1042 int i;
1043
1044 /* Set endian type before we access any non-PCI registers. */
1045 pci_write_config(sc->bnx_dev, BGE_PCI_MISC_CTL,
1046 BGE_INIT | BGE_PCIMISCCTL_TAGGED_STATUS, 4);
1047
1048 /* Clear the MAC control register */
1049 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1050
1051 /*
1052 * Clear the MAC statistics block in the NIC's
1053 * internal memory.
1054 */
1055 for (i = BGE_STATS_BLOCK;
1056 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1057 BNX_MEMWIN_WRITE(sc, i, 0);
1058
1059 for (i = BGE_STATUS_BLOCK;
1060 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1061 BNX_MEMWIN_WRITE(sc, i, 0);
1062
d7872545
SZ
1063 if (BNX_IS_57765_FAMILY(sc)) {
1064 uint32_t val;
1065
1066 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) {
1067 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
1068 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS;
1069
1070 /* Access the lower 1K of PL PCI-E block registers. */
1071 CSR_WRITE_4(sc, BGE_MODE_CTL,
1072 val | BGE_MODECTL_PCIE_PL_SEL);
1073
1074 val = CSR_READ_4(sc, BGE_PCIE_PL_LO_PHYCTL5);
1075 val |= BGE_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ;
1076 CSR_WRITE_4(sc, BGE_PCIE_PL_LO_PHYCTL5, val);
1077
1078 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1079 }
1080 if (sc->bnx_chiprev != BGE_CHIPREV_57765_AX) {
1081 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
1082 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS;
1083
1084 /* Access the lower 1K of DL PCI-E block registers. */
1085 CSR_WRITE_4(sc, BGE_MODE_CTL,
1086 val | BGE_MODECTL_PCIE_DL_SEL);
1087
1088 val = CSR_READ_4(sc, BGE_PCIE_DL_LO_FTSMAX);
1089 val &= ~BGE_PCIE_DL_LO_FTSMAX_MASK;
1090 val |= BGE_PCIE_DL_LO_FTSMAX_VAL;
1091 CSR_WRITE_4(sc, BGE_PCIE_DL_LO_FTSMAX, val);
1092
1093 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1094 }
1095
1096 val = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK);
1097 val &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK;
1098 val |= BGE_CPMU_LSPD_10MB_MACCLK_6_25;
1099 CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, val);
1100 }
1101
2890cca3
SZ
1102 /*
1103 * Set up the PCI DMA control register.
1104 */
1105 dma_rw_ctl = pci_read_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, 4);
1106 /*
1107 * Disable 32bytes cache alignment for DMA write to host memory
1108 *
1109 * NOTE:
1110 * 64bytes cache alignment for DMA write to host memory is still
1111 * enabled.
1112 */
1113 dma_rw_ctl |= BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1114 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0)
1115 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1116 /*
1117 * Enable HW workaround for controllers that misinterpret
1118 * a status tag update and leave interrupts permanently
1119 * disabled.
1120 */
1121 if (sc->bnx_asicrev != BGE_ASICREV_BCM5717 &&
1122 !BNX_IS_57765_FAMILY(sc))
1123 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1124 if (bootverbose) {
1125 if_printf(&sc->arpcom.ac_if, "DMA read/write %#x\n",
1126 dma_rw_ctl);
6c8d8ecc
SZ
1127 }
1128 pci_write_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1129
1130 /*
1131 * Set up general mode register.
1132 */
1133 mode_ctl = bnx_dma_swap_options(sc) | BGE_MODECTL_MAC_ATTN_INTR |
1134 BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM;
1135 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1136
1137 /*
1138 * Disable memory write invalidate. Apparently it is not supported
1139 * properly by these devices. Also ensure that INTx isn't disabled,
1140 * as these chips need it even when using MSI.
1141 */
1142 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_CMD,
1143 (PCIM_CMD_MWRICEN | PCIM_CMD_INTxDIS), 4);
1144
1145 /* Set the timer prescaler (always 66Mhz) */
1146 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1147
1148 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
1149 DELAY(40); /* XXX */
1150
1151 /* Put PHY into ready state */
1152 BNX_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1153 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1154 DELAY(40);
1155 }
1156
1157 return(0);
1158}
1159
1160static int
1161bnx_blockinit(struct bnx_softc *sc)
1162{
1163 struct bge_rcb *rcb;
1164 bus_size_t vrcb;
1165 bge_hostaddr taddr;
1166 uint32_t val;
1167 int i, limit;
1168
1169 /*
1170 * Initialize the memory window pointer register so that
1171 * we can access the first 32K of internal NIC RAM. This will
1172 * allow us to set up the TX send ring RCBs and the RX return
1173 * ring RCBs, plus other things which live in NIC memory.
1174 */
1175 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1176
1177 /* Configure mbuf pool watermarks */
f368d0d9 1178 if (BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
1179 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1180 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) {
1181 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1182 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1183 } else {
1184 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1185 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1186 }
1187 } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
1188 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1189 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1190 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1191 } else {
1192 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1193 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1194 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1195 }
1196
1197 /* Configure DMA resource watermarks */
1198 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1199 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1200
1201 /* Enable buffer manager */
1202 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1203 /*
1204 * Change the arbitration algorithm of TXMBUF read request to
1205 * round-robin instead of priority based for BCM5719. When
1206 * TXFIFO is almost empty, RDMA will hold its request until
1207 * TXFIFO is not almost empty.
1208 */
1209 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719)
1210 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
e5eebe34
SZ
1211 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1212 sc->bnx_chipid == BGE_CHIPID_BCM5719_A0 ||
1213 sc->bnx_chipid == BGE_CHIPID_BCM5720_A0)
1214 val |= BGE_BMANMODE_LOMBUF_ATTN;
6c8d8ecc
SZ
1215 CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1216
1217 /* Poll for buffer manager start indication */
1218 for (i = 0; i < BNX_TIMEOUT; i++) {
1219 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1220 break;
1221 DELAY(10);
1222 }
1223
1224 if (i == BNX_TIMEOUT) {
1225 if_printf(&sc->arpcom.ac_if,
1226 "buffer manager failed to start\n");
1227 return(ENXIO);
1228 }
1229
1230 /* Enable flow-through queues */
1231 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1232 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1233
1234 /* Wait until queue initialization is complete */
1235 for (i = 0; i < BNX_TIMEOUT; i++) {
1236 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1237 break;
1238 DELAY(10);
1239 }
1240
1241 if (i == BNX_TIMEOUT) {
1242 if_printf(&sc->arpcom.ac_if,
1243 "flow-through queue init failed\n");
1244 return(ENXIO);
1245 }
1246
1247 /*
1248 * Summary of rings supported by the controller:
1249 *
1250 * Standard Receive Producer Ring
1251 * - This ring is used to feed receive buffers for "standard"
1252 * sized frames (typically 1536 bytes) to the controller.
1253 *
1254 * Jumbo Receive Producer Ring
1255 * - This ring is used to feed receive buffers for jumbo sized
1256 * frames (i.e. anything bigger than the "standard" frames)
1257 * to the controller.
1258 *
1259 * Mini Receive Producer Ring
1260 * - This ring is used to feed receive buffers for "mini"
1261 * sized frames to the controller.
1262 * - This feature required external memory for the controller
1263 * but was never used in a production system. Should always
1264 * be disabled.
1265 *
1266 * Receive Return Ring
1267 * - After the controller has placed an incoming frame into a
1268 * receive buffer that buffer is moved into a receive return
1269 * ring. The driver is then responsible to passing the
1270 * buffer up to the stack. Many versions of the controller
1271 * support multiple RR rings.
1272 *
1273 * Send Ring
1274 * - This ring is used for outgoing frames. Many versions of
1275 * the controller support multiple send rings.
1276 */
1277
1278 /* Initialize the standard receive producer ring control block. */
1279 rcb = &sc->bnx_ldata.bnx_info.bnx_std_rx_rcb;
1280 rcb->bge_hostaddr.bge_addr_lo =
1281 BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_std_ring_paddr);
1282 rcb->bge_hostaddr.bge_addr_hi =
1283 BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_std_ring_paddr);
f368d0d9 1284 if (BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
1285 /*
1286 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1287 * Bits 15-2 : Maximum RX frame size
1288 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
1289 * Bit 0 : Reserved
1290 */
1291 rcb->bge_maxlen_flags =
1292 BGE_RCB_MAXLEN_FLAGS(512, BNX_MAX_FRAMELEN << 2);
1293 } else {
1294 /*
1295 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1296 * Bits 15-2 : Reserved (should be 0)
1297 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1298 * Bit 0 : Reserved
1299 */
1300 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1301 }
1302 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1303 sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1304 sc->bnx_asicrev == BGE_ASICREV_BCM5720)
1305 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1306 else
1307 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1308 /* Write the standard receive producer ring control block. */
1309 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1310 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1311 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1312 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1313 /* Reset the standard receive producer ring producer index. */
1314 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1315
1316 /*
1317 * Initialize the jumbo RX producer ring control
1318 * block. We set the 'ring disabled' bit in the
1319 * flags field until we're actually ready to start
1320 * using this ring (i.e. once we set the MTU
1321 * high enough to require it).
1322 */
1323 if (BNX_IS_JUMBO_CAPABLE(sc)) {
1324 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb;
1325 /* Get the jumbo receive producer ring RCB parameters. */
1326 rcb->bge_hostaddr.bge_addr_lo =
1327 BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
1328 rcb->bge_hostaddr.bge_addr_hi =
1329 BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
1330 rcb->bge_maxlen_flags =
1331 BGE_RCB_MAXLEN_FLAGS(BNX_MAX_FRAMELEN,
1332 BGE_RCB_FLAG_RING_DISABLED);
1333 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1334 sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1335 sc->bnx_asicrev == BGE_ASICREV_BCM5720)
1336 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1337 else
1338 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1339 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1340 rcb->bge_hostaddr.bge_addr_hi);
1341 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1342 rcb->bge_hostaddr.bge_addr_lo);
1343 /* Program the jumbo receive producer ring RCB parameters. */
1344 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1345 rcb->bge_maxlen_flags);
1346 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1347 /* Reset the jumbo receive producer ring producer index. */
1348 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1349 }
1350
1351 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
1352 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
1353 (sc->bnx_chipid == BGE_CHIPID_BCM5906_A0 ||
1354 sc->bnx_chipid == BGE_CHIPID_BCM5906_A1 ||
1355 sc->bnx_chipid == BGE_CHIPID_BCM5906_A2)) {
1356 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1357 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1358 }
1359
1360 /*
1361 * The BD ring replenish thresholds control how often the
1362 * hardware fetches new BD's from the producer rings in host
1363 * memory. Setting the value too low on a busy system can
1364 * starve the hardware and recue the throughpout.
1365 *
1366 * Set the BD ring replentish thresholds. The recommended
1367 * values are 1/8th the number of descriptors allocated to
1368 * each ring.
1369 */
1370 val = 8;
1371 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1372 if (BNX_IS_JUMBO_CAPABLE(sc)) {
1373 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1374 BGE_JUMBO_RX_RING_CNT/8);
1375 }
f368d0d9 1376 if (BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
1377 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1378 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1379 }
1380
1381 /*
1382 * Disable all send rings by setting the 'ring disabled' bit
1383 * in the flags field of all the TX send ring control blocks,
1384 * located in NIC memory.
1385 */
80969639
SZ
1386 if (BNX_IS_5717_PLUS(sc))
1387 limit = 4;
4f23029e
SZ
1388 else if (BNX_IS_57765_FAMILY(sc))
1389 limit = 2;
80969639
SZ
1390 else
1391 limit = 1;
6c8d8ecc
SZ
1392 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1393 for (i = 0; i < limit; i++) {
1394 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1395 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1396 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1397 vrcb += sizeof(struct bge_rcb);
1398 }
1399
1400 /* Configure send ring RCB 0 (we use only the first ring) */
1401 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1402 BGE_HOSTADDR(taddr, sc->bnx_ldata.bnx_tx_ring_paddr);
1403 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1404 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1405 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1406 sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1407 sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1408 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
1409 } else {
1410 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1411 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1412 }
1413 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1414 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1415
1416 /*
1417 * Disable all receive return rings by setting the
1418 * 'ring disabled' bit in the flags field of all the receive
1419 * return ring control blocks, located in NIC memory.
1420 */
80969639 1421 if (BNX_IS_5717_PLUS(sc)) {
6c8d8ecc
SZ
1422 /* Should be 17, use 16 until we get an SRAM map. */
1423 limit = 16;
4f23029e 1424 } else if (BNX_IS_57765_FAMILY(sc)) {
6c8d8ecc
SZ
1425 limit = 4;
1426 } else {
1427 limit = 1;
1428 }
1429 /* Disable all receive return rings. */
1430 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1431 for (i = 0; i < limit; i++) {
1432 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1433 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1434 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1435 BGE_RCB_FLAG_RING_DISABLED);
1436 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1437 bnx_writembx(sc, BGE_MBX_RX_CONS0_LO +
1438 (i * (sizeof(uint64_t))), 0);
1439 vrcb += sizeof(struct bge_rcb);
1440 }
1441
1442 /*
1443 * Set up receive return ring 0. Note that the NIC address
1444 * for RX return rings is 0x0. The return rings live entirely
1445 * within the host, so the nicaddr field in the RCB isn't used.
1446 */
1447 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1448 BGE_HOSTADDR(taddr, sc->bnx_ldata.bnx_rx_return_ring_paddr);
1449 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1450 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1451 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1452 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1453 BGE_RCB_MAXLEN_FLAGS(sc->bnx_return_ring_cnt, 0));
1454
1455 /* Set random backoff seed for TX */
1456 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1457 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1458 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1459 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1460 BGE_TX_BACKOFF_SEED_MASK);
1461
1462 /* Set inter-packet gap */
1463 val = 0x2620;
1464 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1465 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
1466 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
1467 }
1468 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
1469
1470 /*
1471 * Specify which ring to use for packets that don't match
1472 * any RX rules.
1473 */
1474 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1475
1476 /*
1477 * Configure number of RX lists. One interrupt distribution
1478 * list, sixteen active lists, one bad frames class.
1479 */
1480 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1481
1482 /* Inialize RX list placement stats mask. */
1483 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1484 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1485
1486 /* Disable host coalescing until we get it set up */
1487 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1488
1489 /* Poll to make sure it's shut down. */
1490 for (i = 0; i < BNX_TIMEOUT; i++) {
1491 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1492 break;
1493 DELAY(10);
1494 }
1495
1496 if (i == BNX_TIMEOUT) {
1497 if_printf(&sc->arpcom.ac_if,
1498 "host coalescing engine failed to idle\n");
1499 return(ENXIO);
1500 }
1501
1502 /* Set up host coalescing defaults */
1503 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bnx_rx_coal_ticks);
1504 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bnx_tx_coal_ticks);
1505 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bnx_rx_coal_bds);
1506 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bnx_tx_coal_bds);
1507 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, sc->bnx_rx_coal_bds_int);
1508 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, sc->bnx_tx_coal_bds_int);
1509
1510 /* Set up address of status block */
1511 bzero(sc->bnx_ldata.bnx_status_block, BGE_STATUS_BLK_SZ);
1512 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1513 BGE_ADDR_HI(sc->bnx_ldata.bnx_status_block_paddr));
1514 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1515 BGE_ADDR_LO(sc->bnx_ldata.bnx_status_block_paddr));
1516
1517 /* Set up status block partail update size. */
1518 val = BGE_STATBLKSZ_32BYTE;
1519#if 0
1520 /*
1521 * Does not seem to have visible effect in both
1522 * bulk data (1472B UDP datagram) and tiny data
1523 * (18B UDP datagram) TX tests.
1524 */
1525 val |= BGE_HCCMODE_CLRTICK_TX;
1526#endif
1527 /* Turn on host coalescing state machine */
1528 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1529
1530 /* Turn on RX BD completion state machine and enable attentions */
1531 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1532 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1533
1534 /* Turn on RX list placement state machine */
1535 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1536
1537 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1538 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1539 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1540 BGE_MACMODE_FRMHDR_DMA_ENB;
1541
1542 if (sc->bnx_flags & BNX_FLAG_TBI)
1543 val |= BGE_PORTMODE_TBI;
1544 else if (sc->bnx_flags & BNX_FLAG_MII_SERDES)
1545 val |= BGE_PORTMODE_GMII;
1546 else
1547 val |= BGE_PORTMODE_MII;
1548
1549 /* Turn on DMA, clear stats */
1550 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1551
1552 /* Set misc. local control, enable interrupts on attentions */
1553 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1554
1555#ifdef notdef
1556 /* Assert GPIO pins for PHY reset */
1557 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1558 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1559 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1560 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1561#endif
1562
1563 /* Turn on write DMA state machine */
1564 val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1565 /* Enable host coalescing bug fix. */
1566 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1567 if (sc->bnx_asicrev == BGE_ASICREV_BCM5785) {
1568 /* Request larger DMA burst size to get better performance. */
1569 val |= BGE_WDMAMODE_BURST_ALL_DATA;
1570 }
1571 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1572 DELAY(40);
1573
3730a14d 1574 if (BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
1575 uint32_t dmactl;
1576
1577 dmactl = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL);
1578 /*
1579 * Adjust tx margin to prevent TX data corruption and
1580 * fix internal FIFO overflow.
1581 */
1582 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1583 sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1584 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
1585 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
1586 BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
1587 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
1588 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
1589 BGE_RDMA_RSRVCTRL_TXMRGN_320B;
1590 }
1591 /*
1592 * Enable fix for read DMA FIFO overruns.
1593 * The fix is to limit the number of RX BDs
1594 * the hardware would fetch at a fime.
1595 */
1596 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL,
1597 dmactl | BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
1598 }
1599
1600 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) {
1601 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
1602 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
1603 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
1604 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
1605 } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1606 /*
1607 * Allow 4KB burst length reads for non-LSO frames.
1608 * Enable 512B burst length reads for buffer descriptors.
1609 */
1610 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
1611 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
1612 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
1613 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
1614 }
1615
1616 /* Turn on read DMA state machine */
1617 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1618 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717)
1619 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
1620 if (sc->bnx_asicrev == BGE_ASICREV_BCM5784 ||
1621 sc->bnx_asicrev == BGE_ASICREV_BCM5785 ||
1622 sc->bnx_asicrev == BGE_ASICREV_BCM57780) {
1623 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1624 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1625 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1626 }
1627 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1628 val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
1629 BGE_RDMAMODE_H2BNC_VLAN_DET;
1630 /*
1631 * Allow multiple outstanding read requests from
1632 * non-LSO read DMA engine.
1633 */
1634 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
1635 }
1636 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1637 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1638 DELAY(40);
1639
1640 /* Turn on RX data completion state machine */
1641 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1642
1643 /* Turn on RX BD initiator state machine */
1644 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1645
1646 /* Turn on RX data and RX BD initiator state machine */
1647 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1648
1649 /* Turn on send BD completion state machine */
1650 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1651
1652 /* Turn on send data completion state machine */
1653 val = BGE_SDCMODE_ENABLE;
1654 if (sc->bnx_asicrev == BGE_ASICREV_BCM5761)
1655 val |= BGE_SDCMODE_CDELAY;
1656 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1657
1658 /* Turn on send data initiator state machine */
1659 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1660
1661 /* Turn on send BD initiator state machine */
1662 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1663
1664 /* Turn on send BD selector state machine */
1665 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1666
1667 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1668 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1669 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1670
1671 /* ack/clear link change events */
1672 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1673 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1674 BGE_MACSTAT_LINK_CHANGED);
1675 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1676
1677 /*
1678 * Enable attention when the link has changed state for
1679 * devices that use auto polling.
1680 */
1681 if (sc->bnx_flags & BNX_FLAG_TBI) {
1682 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1683 } else {
1684 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
1685 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
1686 DELAY(80);
1687 }
1688 }
1689
1690 /*
1691 * Clear any pending link state attention.
1692 * Otherwise some link state change events may be lost until attention
1693 * is cleared by bnx_intr() -> bnx_softc.bnx_link_upd() sequence.
1694 * It's not necessary on newer BCM chips - perhaps enabling link
1695 * state change attentions implies clearing pending attention.
1696 */
1697 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1698 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1699 BGE_MACSTAT_LINK_CHANGED);
1700
1701 /* Enable link state change attentions. */
1702 BNX_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1703
1704 return(0);
1705}
1706
1707/*
1708 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1709 * against our list and return its name if we find a match. Note
1710 * that since the Broadcom controller contains VPD support, we
1711 * can get the device name string from the controller itself instead
1712 * of the compiled-in string. This is a little slow, but it guarantees
1713 * we'll always announce the right product name.
1714 */
1715static int
1716bnx_probe(device_t dev)
1717{
1718 const struct bnx_type *t;
1719 uint16_t product, vendor;
1720
1721 if (!pci_is_pcie(dev))
1722 return ENXIO;
1723
1724 product = pci_get_device(dev);
1725 vendor = pci_get_vendor(dev);
1726
1727 for (t = bnx_devs; t->bnx_name != NULL; t++) {
1728 if (vendor == t->bnx_vid && product == t->bnx_did)
1729 break;
1730 }
1731 if (t->bnx_name == NULL)
1732 return ENXIO;
1733
1734 device_set_desc(dev, t->bnx_name);
1735 return 0;
1736}
1737
1738static int
1739bnx_attach(device_t dev)
1740{
1741 struct ifnet *ifp;
1742 struct bnx_softc *sc;
1743 uint32_t hwcfg = 0, misccfg;
1744 int error = 0, rid, capmask;
1745 uint8_t ether_addr[ETHER_ADDR_LEN];
1746 uint16_t product, vendor;
1747 driver_intr_t *intr_func;
1748 uintptr_t mii_priv = 0;
1749 u_int intr_flags;
1750
1751 sc = device_get_softc(dev);
1752 sc->bnx_dev = dev;
50668ed5 1753 callout_init_mp(&sc->bnx_stat_timer);
6c8d8ecc
SZ
1754 lwkt_serialize_init(&sc->bnx_jslot_serializer);
1755
1756 product = pci_get_device(dev);
1757 vendor = pci_get_vendor(dev);
1758
1759#ifndef BURN_BRIDGES
1760 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1761 uint32_t irq, mem;
1762
1763 irq = pci_read_config(dev, PCIR_INTLINE, 4);
1764 mem = pci_read_config(dev, BGE_PCI_BAR0, 4);
1765
1766 device_printf(dev, "chip is in D%d power mode "
1767 "-- setting to D0\n", pci_get_powerstate(dev));
1768
1769 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1770
1771 pci_write_config(dev, PCIR_INTLINE, irq, 4);
1772 pci_write_config(dev, BGE_PCI_BAR0, mem, 4);
1773 }
1774#endif /* !BURN_BRIDGE */
1775
1776 /*
1777 * Map control/status registers.
1778 */
1779 pci_enable_busmaster(dev);
1780
1781 rid = BGE_PCI_BAR0;
1782 sc->bnx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1783 RF_ACTIVE);
1784
1785 if (sc->bnx_res == NULL) {
1786 device_printf(dev, "couldn't map memory\n");
1787 return ENXIO;
1788 }
1789
1790 sc->bnx_btag = rman_get_bustag(sc->bnx_res);
1791 sc->bnx_bhandle = rman_get_bushandle(sc->bnx_res);
1792
1793 /* Save various chip information */
1794 sc->bnx_chipid =
1795 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
1796 BGE_PCIMISCCTL_ASICREV_SHIFT;
1797 if (BGE_ASICREV(sc->bnx_chipid) == BGE_ASICREV_USE_PRODID_REG) {
1798 /* All chips having dedicated ASICREV register have CPMU */
1799 sc->bnx_flags |= BNX_FLAG_CPMU;
1800
1801 switch (product) {
1802 case PCI_PRODUCT_BROADCOM_BCM5717:
1803 case PCI_PRODUCT_BROADCOM_BCM5718:
1804 case PCI_PRODUCT_BROADCOM_BCM5719:
1805 case PCI_PRODUCT_BROADCOM_BCM5720_ALT:
1806 sc->bnx_chipid = pci_read_config(dev,
1807 BGE_PCI_GEN2_PRODID_ASICREV, 4);
1808 break;
1809
1810 case PCI_PRODUCT_BROADCOM_BCM57761:
32ff3c80 1811 case PCI_PRODUCT_BROADCOM_BCM57762:
6c8d8ecc 1812 case PCI_PRODUCT_BROADCOM_BCM57765:
32ff3c80 1813 case PCI_PRODUCT_BROADCOM_BCM57766:
6c8d8ecc 1814 case PCI_PRODUCT_BROADCOM_BCM57781:
32ff3c80 1815 case PCI_PRODUCT_BROADCOM_BCM57782:
6c8d8ecc 1816 case PCI_PRODUCT_BROADCOM_BCM57785:
32ff3c80 1817 case PCI_PRODUCT_BROADCOM_BCM57786:
6c8d8ecc
SZ
1818 case PCI_PRODUCT_BROADCOM_BCM57791:
1819 case PCI_PRODUCT_BROADCOM_BCM57795:
1820 sc->bnx_chipid = pci_read_config(dev,
1821 BGE_PCI_GEN15_PRODID_ASICREV, 4);
1822 break;
1823
1824 default:
1825 sc->bnx_chipid = pci_read_config(dev,
1826 BGE_PCI_PRODID_ASICREV, 4);
1827 break;
1828 }
1829 }
1830 sc->bnx_asicrev = BGE_ASICREV(sc->bnx_chipid);
1831 sc->bnx_chiprev = BGE_CHIPREV(sc->bnx_chipid);
1832
1833 switch (sc->bnx_asicrev) {
1834 case BGE_ASICREV_BCM5717:
1835 case BGE_ASICREV_BCM5719:
1836 case BGE_ASICREV_BCM5720:
f368d0d9
SZ
1837 sc->bnx_flags |= BNX_FLAG_5717_PLUS | BNX_FLAG_57765_PLUS;
1838 break;
1839
6c8d8ecc 1840 case BGE_ASICREV_BCM57765:
32ff3c80 1841 case BGE_ASICREV_BCM57766:
f368d0d9 1842 sc->bnx_flags |= BNX_FLAG_57765_FAMILY | BNX_FLAG_57765_PLUS;
6c8d8ecc
SZ
1843 break;
1844 }
1845 sc->bnx_flags |= BNX_FLAG_SHORTDMA;
1846
6c8d8ecc
SZ
1847 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK;
1848
1849 sc->bnx_pciecap = pci_get_pciecap_ptr(sc->bnx_dev);
1850 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1851 sc->bnx_asicrev == BGE_ASICREV_BCM5720)
1852 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_2048);
1853 else
1854 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096);
1855 device_printf(dev, "CHIP ID 0x%08x; "
1856 "ASIC REV 0x%02x; CHIP REV 0x%02x\n",
1857 sc->bnx_chipid, sc->bnx_asicrev, sc->bnx_chiprev);
1858
1859 /*
1860 * Set various PHY quirk flags.
1861 */
1862
1863 capmask = MII_CAPMASK_DEFAULT;
46283a40
SZ
1864 if (product == PCI_PRODUCT_BROADCOM_BCM57791 ||
1865 product == PCI_PRODUCT_BROADCOM_BCM57795) {
6c8d8ecc
SZ
1866 /* 10/100 only */
1867 capmask &= ~BMSR_EXTSTAT;
1868 }
1869
1870 mii_priv |= BRGPHY_FLAG_WIRESPEED;
1871
6c8d8ecc
SZ
1872 /*
1873 * Allocate interrupt
1874 */
1875 sc->bnx_irq_type = pci_alloc_1intr(dev, bnx_msi_enable, &sc->bnx_irq_rid,
1876 &intr_flags);
1877
1878 sc->bnx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->bnx_irq_rid,
1879 intr_flags);
1880 if (sc->bnx_irq == NULL) {
1881 device_printf(dev, "couldn't map interrupt\n");
1882 error = ENXIO;
1883 goto fail;
1884 }
1885
1886 if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) {
1887 sc->bnx_flags |= BNX_FLAG_ONESHOT_MSI;
1888 bnx_enable_msi(sc);
1889 }
1890
1891 /* Initialize if_name earlier, so if_printf could be used */
1892 ifp = &sc->arpcom.ac_if;
1893 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1894
1895 /* Try to reset the chip. */
1896 bnx_reset(sc);
1897
1898 if (bnx_chipinit(sc)) {
1899 device_printf(dev, "chip initialization failed\n");
1900 error = ENXIO;
1901 goto fail;
1902 }
1903
1904 /*
1905 * Get station address
1906 */
1907 error = bnx_get_eaddr(sc, ether_addr);
1908 if (error) {
1909 device_printf(dev, "failed to read station address\n");
1910 goto fail;
1911 }
1912
f368d0d9 1913 if (BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
1914 sc->bnx_return_ring_cnt = BGE_RETURN_RING_CNT;
1915 } else {
1916 /* 5705/5750 limits RX return ring to 512 entries. */
1917 sc->bnx_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1918 }
1919
1920 error = bnx_dma_alloc(sc);
1921 if (error)
1922 goto fail;
1923
1924 /* Set default tuneable values. */
1925 sc->bnx_rx_coal_ticks = BNX_RX_COAL_TICKS_DEF;
1926 sc->bnx_tx_coal_ticks = BNX_TX_COAL_TICKS_DEF;
1927 sc->bnx_rx_coal_bds = BNX_RX_COAL_BDS_DEF;
1928 sc->bnx_tx_coal_bds = BNX_TX_COAL_BDS_DEF;
1929 sc->bnx_rx_coal_bds_int = BNX_RX_COAL_BDS_DEF;
1930 sc->bnx_tx_coal_bds_int = BNX_TX_COAL_BDS_DEF;
1931
1932 /* Set up ifnet structure */
1933 ifp->if_softc = sc;
1934 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1935 ifp->if_ioctl = bnx_ioctl;
1936 ifp->if_start = bnx_start;
1937#ifdef DEVICE_POLLING
1938 ifp->if_poll = bnx_poll;
1939#endif
1940 ifp->if_watchdog = bnx_watchdog;
1941 ifp->if_init = bnx_init;
1942 ifp->if_mtu = ETHERMTU;
1943 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1944 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1945 ifq_set_ready(&ifp->if_snd);
1946
1947 ifp->if_capabilities |= IFCAP_HWCSUM;
1948 ifp->if_hwassist = BNX_CSUM_FEATURES;
1949 ifp->if_capenable = ifp->if_capabilities;
1950
1951 /*
1952 * Figure out what sort of media we have by checking the
1953 * hardware config word in the first 32k of NIC internal memory,
1954 * or fall back to examining the EEPROM if necessary.
1955 * Note: on some BCM5700 cards, this value appears to be unset.
1956 * If that's the case, we have to rely on identifying the NIC
1957 * by its PCI subsystem ID, as we do below for the SysKonnect
1958 * SK-9D41.
1959 */
1960 if (bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) {
1961 hwcfg = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
1962 } else {
1963 if (bnx_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
1964 sizeof(hwcfg))) {
1965 device_printf(dev, "failed to read EEPROM\n");
1966 error = ENXIO;
1967 goto fail;
1968 }
1969 hwcfg = ntohl(hwcfg);
1970 }
1971
1972 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
1973 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41 ||
1974 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
1975 sc->bnx_flags |= BNX_FLAG_TBI;
1976
1977 /* Setup MI MODE */
1978 if (sc->bnx_flags & BNX_FLAG_CPMU)
1979 sc->bnx_mi_mode = BGE_MIMODE_500KHZ_CONST;
1980 else
1981 sc->bnx_mi_mode = BGE_MIMODE_BASE;
1982
1983 /* Setup link status update stuffs */
1984 if (sc->bnx_flags & BNX_FLAG_TBI) {
1985 sc->bnx_link_upd = bnx_tbi_link_upd;
1986 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
1987 } else if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
1988 sc->bnx_link_upd = bnx_autopoll_link_upd;
1989 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
1990 } else {
1991 sc->bnx_link_upd = bnx_copper_link_upd;
1992 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
1993 }
1994
1995 /* Set default PHY address */
1996 sc->bnx_phyno = 1;
1997
1998 /*
1999 * PHY address mapping for various devices.
2000 *
2001 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2002 * ---------+-------+-------+-------+-------+
2003 * BCM57XX | 1 | X | X | X |
2004 * BCM5704 | 1 | X | 1 | X |
2005 * BCM5717 | 1 | 8 | 2 | 9 |
2006 * BCM5719 | 1 | 8 | 2 | 9 |
2007 * BCM5720 | 1 | 8 | 2 | 9 |
2008 *
2009 * Other addresses may respond but they are not
2010 * IEEE compliant PHYs and should be ignored.
2011 */
80969639 2012 if (BNX_IS_5717_PLUS(sc)) {
6c8d8ecc
SZ
2013 int f;
2014
2015 f = pci_get_function(dev);
2016 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_A0) {
2017 if (CSR_READ_4(sc, BGE_SGDIG_STS) &
2018 BGE_SGDIGSTS_IS_SERDES)
2019 sc->bnx_phyno = f + 8;
2020 else
2021 sc->bnx_phyno = f + 1;
2022 } else {
2023 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2024 BGE_CPMU_PHY_STRAP_IS_SERDES)
2025 sc->bnx_phyno = f + 8;
2026 else
2027 sc->bnx_phyno = f + 1;
2028 }
2029 }
2030
2031 if (sc->bnx_flags & BNX_FLAG_TBI) {
2032 ifmedia_init(&sc->bnx_ifmedia, IFM_IMASK,
2033 bnx_ifmedia_upd, bnx_ifmedia_sts);
2034 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2035 ifmedia_add(&sc->bnx_ifmedia,
2036 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2037 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2038 ifmedia_set(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO);
2039 sc->bnx_ifmedia.ifm_media = sc->bnx_ifmedia.ifm_cur->ifm_media;
2040 } else {
2041 struct mii_probe_args mii_args;
2042
2043 mii_probe_args_init(&mii_args, bnx_ifmedia_upd, bnx_ifmedia_sts);
2044 mii_args.mii_probemask = 1 << sc->bnx_phyno;
2045 mii_args.mii_capmask = capmask;
2046 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY;
2047 mii_args.mii_priv = mii_priv;
2048
2049 error = mii_probe(dev, &sc->bnx_miibus, &mii_args);
2050 if (error) {
2051 device_printf(dev, "MII without any PHY!\n");
2052 goto fail;
2053 }
2054 }
2055
2056 /*
2057 * Create sysctl nodes.
2058 */
2059 sysctl_ctx_init(&sc->bnx_sysctl_ctx);
2060 sc->bnx_sysctl_tree = SYSCTL_ADD_NODE(&sc->bnx_sysctl_ctx,
2061 SYSCTL_STATIC_CHILDREN(_hw),
2062 OID_AUTO,
2063 device_get_nameunit(dev),
2064 CTLFLAG_RD, 0, "");
2065 if (sc->bnx_sysctl_tree == NULL) {
2066 device_printf(dev, "can't add sysctl node\n");
2067 error = ENXIO;
2068 goto fail;
2069 }
2070
2071 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2072 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2073 OID_AUTO, "rx_coal_ticks",
2074 CTLTYPE_INT | CTLFLAG_RW,
2075 sc, 0, bnx_sysctl_rx_coal_ticks, "I",
2076 "Receive coalescing ticks (usec).");
2077 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2078 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2079 OID_AUTO, "tx_coal_ticks",
2080 CTLTYPE_INT | CTLFLAG_RW,
2081 sc, 0, bnx_sysctl_tx_coal_ticks, "I",
2082 "Transmit coalescing ticks (usec).");
2083 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2084 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2085 OID_AUTO, "rx_coal_bds",
2086 CTLTYPE_INT | CTLFLAG_RW,
2087 sc, 0, bnx_sysctl_rx_coal_bds, "I",
2088 "Receive max coalesced BD count.");
2089 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2090 SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2091 OID_AUTO, "tx_coal_bds",
2092 CTLTYPE_INT | CTLFLAG_RW,
2093 sc, 0, bnx_sysctl_tx_coal_bds, "I",
2094 "Transmit max coalesced BD count.");
2095 /*
2096 * A common design characteristic for many Broadcom
2097 * client controllers is that they only support a
2098 * single outstanding DMA read operation on the PCIe
2099 * bus. This means that it will take twice as long to
2100 * fetch a TX frame that is split into header and
2101 * payload buffers as it does to fetch a single,
2102 * contiguous TX frame (2 reads vs. 1 read). For these
2103 * controllers, coalescing buffers to reduce the number
2104 * of memory reads is effective way to get maximum
2105 * performance(about 940Mbps). Without collapsing TX
2106 * buffers the maximum TCP bulk transfer performance
2107 * is about 850Mbps. However forcing coalescing mbufs
2108 * consumes a lot of CPU cycles, so leave it off by
2109 * default.
2110 */
2111 SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx,
2112 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2113 "force_defrag", CTLFLAG_RW, &sc->bnx_force_defrag, 0,
2114 "Force defragment on TX path");
2115
2116 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2117 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2118 "rx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2119 sc, 0, bnx_sysctl_rx_coal_bds_int, "I",
2120 "Receive max coalesced BD count during interrupt.");
2121 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2122 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2123 "tx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2124 sc, 0, bnx_sysctl_tx_coal_bds_int, "I",
2125 "Transmit max coalesced BD count during interrupt.");
2126
2127 /*
2128 * Call MI attach routine.
2129 */
2130 ether_ifattach(ifp, ether_addr, NULL);
2131
2132 if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) {
2133 if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) {
2134 intr_func = bnx_msi_oneshot;
2135 if (bootverbose)
2136 device_printf(dev, "oneshot MSI\n");
2137 } else {
2138 intr_func = bnx_msi;
2139 }
2140 } else {
2141 intr_func = bnx_intr_legacy;
2142 }
2143 error = bus_setup_intr(dev, sc->bnx_irq, INTR_MPSAFE, intr_func, sc,
2144 &sc->bnx_intrhand, ifp->if_serializer);
2145 if (error) {
2146 ether_ifdetach(ifp);
2147 device_printf(dev, "couldn't set up irq\n");
2148 goto fail;
2149 }
2150
2151 ifp->if_cpuid = rman_get_cpuid(sc->bnx_irq);
2152 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
2153
8ca0f604
SZ
2154 sc->bnx_stat_cpuid = ifp->if_cpuid;
2155
6c8d8ecc
SZ
2156 return(0);
2157fail:
2158 bnx_detach(dev);
2159 return(error);
2160}
2161
2162static int
2163bnx_detach(device_t dev)
2164{
2165 struct bnx_softc *sc = device_get_softc(dev);
2166
2167 if (device_is_attached(dev)) {
2168 struct ifnet *ifp = &sc->arpcom.ac_if;
2169
2170 lwkt_serialize_enter(ifp->if_serializer);
2171 bnx_stop(sc);
2172 bnx_reset(sc);
2173 bus_teardown_intr(dev, sc->bnx_irq, sc->bnx_intrhand);
2174 lwkt_serialize_exit(ifp->if_serializer);
2175
2176 ether_ifdetach(ifp);
2177 }
2178
2179 if (sc->bnx_flags & BNX_FLAG_TBI)
2180 ifmedia_removeall(&sc->bnx_ifmedia);
2181 if (sc->bnx_miibus)
2182 device_delete_child(dev, sc->bnx_miibus);
2183 bus_generic_detach(dev);
2184
2185 if (sc->bnx_irq != NULL) {
2186 bus_release_resource(dev, SYS_RES_IRQ, sc->bnx_irq_rid,
2187 sc->bnx_irq);
2188 }
2189 if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI)
2190 pci_release_msi(dev);
2191
2192 if (sc->bnx_res != NULL) {
2193 bus_release_resource(dev, SYS_RES_MEMORY,
2194 BGE_PCI_BAR0, sc->bnx_res);
2195 }
2196
2197 if (sc->bnx_sysctl_tree != NULL)
2198 sysctl_ctx_free(&sc->bnx_sysctl_ctx);
2199
2200 bnx_dma_free(sc);
2201
2202 return 0;
2203}
2204
2205static void
2206bnx_reset(struct bnx_softc *sc)
2207{
2208 device_t dev;
2209 uint32_t cachesize, command, pcistate, reset;
2210 void (*write_op)(struct bnx_softc *, uint32_t, uint32_t);
2211 int i, val = 0;
2212 uint16_t devctl;
2213
2214 dev = sc->bnx_dev;
2215
2216 if (sc->bnx_asicrev != BGE_ASICREV_BCM5906)
2217 write_op = bnx_writemem_direct;
2218 else
2219 write_op = bnx_writereg_ind;
2220
2221 /* Save some important PCI state. */
2222 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2223 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2224 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2225
2226 pci_write_config(dev, BGE_PCI_MISC_CTL,
2227 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2228 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2229 BGE_PCIMISCCTL_TAGGED_STATUS, 4);
2230
2231 /* Disable fastboot on controllers that support it. */
2232 if (bootverbose)
2233 if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n");
2234 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2235
2236 /*
2237 * Write the magic number to SRAM at offset 0xB50.
2238 * When firmware finishes its initialization it will
2239 * write ~BGE_MAGIC_NUMBER to the same location.
2240 */
2241 bnx_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2242
2243 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2244
2245 /* XXX: Broadcom Linux driver. */
2246 /* Force PCI-E 1.0a mode */
3730a14d 2247 if (!BNX_IS_57765_PLUS(sc) &&
6c8d8ecc
SZ
2248 CSR_READ_4(sc, BGE_PCIE_PHY_TSTCTL) ==
2249 (BGE_PCIE_PHY_TSTCTL_PSCRAM |
2250 BGE_PCIE_PHY_TSTCTL_PCIE10)) {
2251 CSR_WRITE_4(sc, BGE_PCIE_PHY_TSTCTL,
2252 BGE_PCIE_PHY_TSTCTL_PSCRAM);
2253 }
2254 if (sc->bnx_chipid != BGE_CHIPID_BCM5750_A0) {
2255 /* Prevent PCIE link training during global reset */
2256 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2257 reset |= (1<<29);
2258 }
2259
2260 /*
2261 * Set GPHY Power Down Override to leave GPHY
2262 * powered up in D0 uninitialized.
2263 */
2264 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0)
2265 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
2266
2267 /* Issue global reset */
2268 write_op(sc, BGE_MISC_CFG, reset);
2269
2270 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
2271 uint32_t status, ctrl;
2272
2273 status = CSR_READ_4(sc, BGE_VCPU_STATUS);
2274 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
2275 status | BGE_VCPU_STATUS_DRV_RESET);
2276 ctrl = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
2277 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
2278 ctrl & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
2279 }
2280
2281 DELAY(1000);
2282
2283 /* XXX: Broadcom Linux driver. */
2284 if (sc->bnx_chipid == BGE_CHIPID_BCM5750_A0) {
2285 uint32_t v;
2286
2287 DELAY(500000); /* wait for link training to complete */
2288 v = pci_read_config(dev, 0xc4, 4);
2289 pci_write_config(dev, 0xc4, v | (1<<15), 4);
2290 }
2291
2292 devctl = pci_read_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL, 2);
2293
2294 /* Disable no snoop and disable relaxed ordering. */
2295 devctl &= ~(PCIEM_DEVCTL_RELAX_ORDER | PCIEM_DEVCTL_NOSNOOP);
2296
2297 /* Old PCI-E chips only support 128 bytes Max PayLoad Size. */
2298 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0) {
2299 devctl &= ~PCIEM_DEVCTL_MAX_PAYLOAD_MASK;
2300 devctl |= PCIEM_DEVCTL_MAX_PAYLOAD_128;
2301 }
2302
2303 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL,
2304 devctl, 2);
2305
2306 /* Clear error status. */
2307 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVSTS,
2308 PCIEM_DEVSTS_CORR_ERR |
2309 PCIEM_DEVSTS_NFATAL_ERR |
2310 PCIEM_DEVSTS_FATAL_ERR |
2311 PCIEM_DEVSTS_UNSUPP_REQ, 2);
2312
2313 /* Reset some of the PCI state that got zapped by reset */
2314 pci_write_config(dev, BGE_PCI_MISC_CTL,
2315 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2316 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2317 BGE_PCIMISCCTL_TAGGED_STATUS, 4);
2318 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2319 pci_write_config(dev, BGE_PCI_CMD, command, 4);
2320 write_op(sc, BGE_MISC_CFG, (65 << 1));
2321
2322 /* Enable memory arbiter */
2323 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2324
2325 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
2326 for (i = 0; i < BNX_TIMEOUT; i++) {
2327 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2328 if (val & BGE_VCPU_STATUS_INIT_DONE)
2329 break;
2330 DELAY(100);
2331 }
2332 if (i == BNX_TIMEOUT) {
2333 if_printf(&sc->arpcom.ac_if, "reset timed out\n");
2334 return;
2335 }
2336 } else {
2337 /*
2338 * Poll until we see the 1's complement of the magic number.
2339 * This indicates that the firmware initialization
2340 * is complete.
2341 */
2342 for (i = 0; i < BNX_FIRMWARE_TIMEOUT; i++) {
2343 val = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2344 if (val == ~BGE_MAGIC_NUMBER)
2345 break;
2346 DELAY(10);
2347 }
2348 if (i == BNX_FIRMWARE_TIMEOUT) {
2349 if_printf(&sc->arpcom.ac_if, "firmware handshake "
2350 "timed out, found 0x%08x\n", val);
2351 }
2352
2353 /* BCM57765 A0 needs additional time before accessing. */
2354 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0)
2355 DELAY(10 * 1000);
2356 }
2357
2358 /*
2359 * XXX Wait for the value of the PCISTATE register to
2360 * return to its original pre-reset state. This is a
2361 * fairly good indicator of reset completion. If we don't
2362 * wait for the reset to fully complete, trying to read
2363 * from the device's non-PCI registers may yield garbage
2364 * results.
2365 */
2366 for (i = 0; i < BNX_TIMEOUT; i++) {
2367 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2368 break;
2369 DELAY(10);
2370 }
2371
2372 /* Fix up byte swapping */
2373 CSR_WRITE_4(sc, BGE_MODE_CTL, bnx_dma_swap_options(sc));
2374
2375 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2376
2377 /*
2378 * The 5704 in TBI mode apparently needs some special
2379 * adjustment to insure the SERDES drive level is set
2380 * to 1.2V.
2381 */
2382 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704 &&
2383 (sc->bnx_flags & BNX_FLAG_TBI)) {
2384 uint32_t serdescfg;
2385
2386 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2387 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2388 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2389 }
2390
2391 /* XXX: Broadcom Linux driver. */
3730a14d 2392 if (!BNX_IS_57765_PLUS(sc)) {
6c8d8ecc
SZ
2393 uint32_t v;
2394
2395 /* Enable Data FIFO protection. */
f1f34fc4
SZ
2396 v = CSR_READ_4(sc, BGE_PCIE_TLDLPL_PORT);
2397 CSR_WRITE_4(sc, BGE_PCIE_TLDLPL_PORT, v | (1 << 25));
6c8d8ecc
SZ
2398 }
2399
2400 DELAY(10000);
2401
2402 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
2403 BNX_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
2404 CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
2405 }
2406}
2407
2408/*
2409 * Frame reception handling. This is called if there's a frame
2410 * on the receive return list.
2411 *
2412 * Note: we have to be able to handle two possibilities here:
2413 * 1) the frame is from the jumbo recieve ring
2414 * 2) the frame is from the standard receive ring
2415 */
2416
2417static void
2418bnx_rxeof(struct bnx_softc *sc, uint16_t rx_prod)
2419{
2420 struct ifnet *ifp;
2421 int stdcnt = 0, jumbocnt = 0;
2422
2423 ifp = &sc->arpcom.ac_if;
2424
2425 while (sc->bnx_rx_saved_considx != rx_prod) {
2426 struct bge_rx_bd *cur_rx;
2427 uint32_t rxidx;
2428 struct mbuf *m = NULL;
2429 uint16_t vlan_tag = 0;
2430 int have_tag = 0;
2431
2432 cur_rx =
2433 &sc->bnx_ldata.bnx_rx_return_ring[sc->bnx_rx_saved_considx];
2434
2435 rxidx = cur_rx->bge_idx;
2436 BNX_INC(sc->bnx_rx_saved_considx, sc->bnx_return_ring_cnt);
2437
2438 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2439 have_tag = 1;
2440 vlan_tag = cur_rx->bge_vlan_tag;
2441 }
2442
2443 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2444 BNX_INC(sc->bnx_jumbo, BGE_JUMBO_RX_RING_CNT);
2445 jumbocnt++;
2446
2447 if (rxidx != sc->bnx_jumbo) {
2448 ifp->if_ierrors++;
2449 if_printf(ifp, "sw jumbo index(%d) "
2450 "and hw jumbo index(%d) mismatch, drop!\n",
2451 sc->bnx_jumbo, rxidx);
2452 bnx_setup_rxdesc_jumbo(sc, rxidx);
2453 continue;
2454 }
2455
2456 m = sc->bnx_cdata.bnx_rx_jumbo_chain[rxidx].bnx_mbuf;
2457 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2458 ifp->if_ierrors++;
2459 bnx_setup_rxdesc_jumbo(sc, sc->bnx_jumbo);
2460 continue;
2461 }
2462 if (bnx_newbuf_jumbo(sc, sc->bnx_jumbo, 0)) {
2463 ifp->if_ierrors++;
2464 bnx_setup_rxdesc_jumbo(sc, sc->bnx_jumbo);
2465 continue;
2466 }
2467 } else {
2468 BNX_INC(sc->bnx_std, BGE_STD_RX_RING_CNT);
2469 stdcnt++;
2470
2471 if (rxidx != sc->bnx_std) {
2472 ifp->if_ierrors++;
2473 if_printf(ifp, "sw std index(%d) "
2474 "and hw std index(%d) mismatch, drop!\n",
2475 sc->bnx_std, rxidx);
2476 bnx_setup_rxdesc_std(sc, rxidx);
2477 continue;
2478 }
2479
2480 m = sc->bnx_cdata.bnx_rx_std_chain[rxidx].bnx_mbuf;
2481 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2482 ifp->if_ierrors++;
2483 bnx_setup_rxdesc_std(sc, sc->bnx_std);
2484 continue;
2485 }
2486 if (bnx_newbuf_std(sc, sc->bnx_std, 0)) {
2487 ifp->if_ierrors++;
2488 bnx_setup_rxdesc_std(sc, sc->bnx_std);
2489 continue;
2490 }
2491 }
2492
2493 ifp->if_ipackets++;
2494 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2495 m->m_pkthdr.rcvif = ifp;
2496
2497 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2498 (cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
2499 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2500 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2501 if ((cur_rx->bge_error_flag &
2502 BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
2503 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2504 }
2505 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2506 m->m_pkthdr.csum_data =
2507 cur_rx->bge_tcp_udp_csum;
2508 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
2509 CSUM_PSEUDO_HDR;
2510 }
2511 }
2512
2513 /*
2514 * If we received a packet with a vlan tag, pass it
2515 * to vlan_input() instead of ether_input().
2516 */
2517 if (have_tag) {
2518 m->m_flags |= M_VLANTAG;
2519 m->m_pkthdr.ether_vlantag = vlan_tag;
2520 have_tag = vlan_tag = 0;
2521 }
2522 ifp->if_input(ifp, m);
2523 }
2524
2525 bnx_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bnx_rx_saved_considx);
2526 if (stdcnt)
2527 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bnx_std);
2528 if (jumbocnt)
2529 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo);
2530}
2531
2532static void
2533bnx_txeof(struct bnx_softc *sc, uint16_t tx_cons)
2534{
2535 struct bge_tx_bd *cur_tx = NULL;
2536 struct ifnet *ifp;
2537
2538 ifp = &sc->arpcom.ac_if;
2539
2540 /*
2541 * Go through our tx ring and free mbufs for those
2542 * frames that have been sent.
2543 */
2544 while (sc->bnx_tx_saved_considx != tx_cons) {
2545 uint32_t idx = 0;
2546
2547 idx = sc->bnx_tx_saved_considx;
2548 cur_tx = &sc->bnx_ldata.bnx_tx_ring[idx];
2549 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2550 ifp->if_opackets++;
2551 if (sc->bnx_cdata.bnx_tx_chain[idx] != NULL) {
2552 bus_dmamap_unload(sc->bnx_cdata.bnx_tx_mtag,
2553 sc->bnx_cdata.bnx_tx_dmamap[idx]);
2554 m_freem(sc->bnx_cdata.bnx_tx_chain[idx]);
2555 sc->bnx_cdata.bnx_tx_chain[idx] = NULL;
2556 }
2557 sc->bnx_txcnt--;
2558 BNX_INC(sc->bnx_tx_saved_considx, BGE_TX_RING_CNT);
2559 }
2560
2561 if (cur_tx != NULL &&
2562 (BGE_TX_RING_CNT - sc->bnx_txcnt) >=
2563 (BNX_NSEG_RSVD + BNX_NSEG_SPARE))
2564 ifp->if_flags &= ~IFF_OACTIVE;
2565
2566 if (sc->bnx_txcnt == 0)
2567 ifp->if_timer = 0;
2568
2569 if (!ifq_is_empty(&ifp->if_snd))
2570 if_devstart(ifp);
2571}
2572
2573#ifdef DEVICE_POLLING
2574
2575static void
2576bnx_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2577{
2578 struct bnx_softc *sc = ifp->if_softc;
2579 struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
2580 uint16_t rx_prod, tx_cons;
2581
2582 switch(cmd) {
2583 case POLL_REGISTER:
2584 bnx_disable_intr(sc);
2585 break;
2586 case POLL_DEREGISTER:
2587 bnx_enable_intr(sc);
2588 break;
2589 case POLL_AND_CHECK_STATUS:
2590 /*
2591 * Process link state changes.
2592 */
2593 bnx_link_poll(sc);
2594 /* Fall through */
2595 case POLL_ONLY:
2596 sc->bnx_status_tag = sblk->bge_status_tag;
2597 /*
2598 * Use a load fence to ensure that status_tag
2599 * is saved before rx_prod and tx_cons.
2600 */
2601 cpu_lfence();
2602
2603 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
2604 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
2605 if (ifp->if_flags & IFF_RUNNING) {
2606 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
2607 if (sc->bnx_rx_saved_considx != rx_prod)
2608 bnx_rxeof(sc, rx_prod);
2609
2610 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
2611 if (sc->bnx_tx_saved_considx != tx_cons)
2612 bnx_txeof(sc, tx_cons);
2613 }
2614 break;
2615 }
2616}
2617
2618#endif
2619
2620static void
2621bnx_intr_legacy(void *xsc)
2622{
2623 struct bnx_softc *sc = xsc;
2624 struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
2625
2626 if (sc->bnx_status_tag == sblk->bge_status_tag) {
2627 uint32_t val;
2628
2629 val = pci_read_config(sc->bnx_dev, BGE_PCI_PCISTATE, 4);
2630 if (val & BGE_PCISTAT_INTR_NOTACT)
2631 return;
2632 }
2633
2634 /*
2635 * NOTE:
2636 * Interrupt will have to be disabled if tagged status
2637 * is used, else interrupt will always be asserted on
2638 * certain chips (at least on BCM5750 AX/BX).
2639 */
2640 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
2641
2642 bnx_intr(sc);
2643}
2644
2645static void
2646bnx_msi(void *xsc)
2647{
2648 struct bnx_softc *sc = xsc;
2649
2650 /* Disable interrupt first */
2651 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
2652 bnx_intr(sc);
2653}
2654
2655static void
2656bnx_msi_oneshot(void *xsc)
2657{
2658 bnx_intr(xsc);
2659}
2660
2661static void
2662bnx_intr(struct bnx_softc *sc)
2663{
2664 struct ifnet *ifp = &sc->arpcom.ac_if;
2665 struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
2666 uint16_t rx_prod, tx_cons;
2667 uint32_t status;
2668
2669 sc->bnx_status_tag = sblk->bge_status_tag;
2670 /*
2671 * Use a load fence to ensure that status_tag is saved
2672 * before rx_prod, tx_cons and status.
2673 */
2674 cpu_lfence();
2675
2676 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
2677 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
2678 status = sblk->bge_status;
2679
2680 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) || sc->bnx_link_evt)
2681 bnx_link_poll(sc);
2682
2683 if (ifp->if_flags & IFF_RUNNING) {
2684 if (sc->bnx_rx_saved_considx != rx_prod)
2685 bnx_rxeof(sc, rx_prod);
2686
2687 if (sc->bnx_tx_saved_considx != tx_cons)
2688 bnx_txeof(sc, tx_cons);
2689 }
2690
2691 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24);
2692
2693 if (sc->bnx_coal_chg)
2694 bnx_coal_change(sc);
2695}
2696
2697static void
2698bnx_tick(void *xsc)
2699{
2700 struct bnx_softc *sc = xsc;
2701 struct ifnet *ifp = &sc->arpcom.ac_if;
2702
2703 lwkt_serialize_enter(ifp->if_serializer);
2704
8ca0f604
SZ
2705 KKASSERT(mycpuid == sc->bnx_stat_cpuid);
2706
6c8d8ecc
SZ
2707 bnx_stats_update_regs(sc);
2708
2709 if (sc->bnx_flags & BNX_FLAG_TBI) {
2710 /*
2711 * Since in TBI mode auto-polling can't be used we should poll
2712 * link status manually. Here we register pending link event
2713 * and trigger interrupt.
2714 */
2715 sc->bnx_link_evt++;
2716 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
2717 } else if (!sc->bnx_link) {
2718 mii_tick(device_get_softc(sc->bnx_miibus));
2719 }
2720
2721 callout_reset(&sc->bnx_stat_timer, hz, bnx_tick, sc);
2722
2723 lwkt_serialize_exit(ifp->if_serializer);
2724}
2725
2726static void
2727bnx_stats_update_regs(struct bnx_softc *sc)
2728{
2729 struct ifnet *ifp = &sc->arpcom.ac_if;
2730 struct bge_mac_stats_regs stats;
2731 uint32_t *s;
2732 int i;
2733
2734 s = (uint32_t *)&stats;
2735 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2736 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2737 s++;
2738 }
2739
2740 ifp->if_collisions +=
2741 (stats.dot3StatsSingleCollisionFrames +
2742 stats.dot3StatsMultipleCollisionFrames +
2743 stats.dot3StatsExcessiveCollisions +
2744 stats.dot3StatsLateCollisions) -
2745 ifp->if_collisions;
2746}
2747
2748/*
2749 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2750 * pointers to descriptors.
2751 */
2752static int
2753bnx_encap(struct bnx_softc *sc, struct mbuf **m_head0, uint32_t *txidx)
2754{
2755 struct bge_tx_bd *d = NULL;
2756 uint16_t csum_flags = 0;
2757 bus_dma_segment_t segs[BNX_NSEG_NEW];
2758 bus_dmamap_t map;
2759 int error, maxsegs, nsegs, idx, i;
2760 struct mbuf *m_head = *m_head0, *m_new;
2761
2762 if (m_head->m_pkthdr.csum_flags) {
2763 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2764 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2765 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2766 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2767 if (m_head->m_flags & M_LASTFRAG)
2768 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2769 else if (m_head->m_flags & M_FRAG)
2770 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2771 }
2772
2773 idx = *txidx;
2774 map = sc->bnx_cdata.bnx_tx_dmamap[idx];
2775
2776 maxsegs = (BGE_TX_RING_CNT - sc->bnx_txcnt) - BNX_NSEG_RSVD;
2777 KASSERT(maxsegs >= BNX_NSEG_SPARE,
2778 ("not enough segments %d", maxsegs));
2779
2780 if (maxsegs > BNX_NSEG_NEW)
2781 maxsegs = BNX_NSEG_NEW;
2782
2783 /*
2784 * Pad outbound frame to BGE_MIN_FRAMELEN for an unusual reason.
2785 * The bge hardware will pad out Tx runts to BGE_MIN_FRAMELEN,
2786 * but when such padded frames employ the bge IP/TCP checksum
2787 * offload, the hardware checksum assist gives incorrect results
2788 * (possibly from incorporating its own padding into the UDP/TCP
2789 * checksum; who knows). If we pad such runts with zeros, the
2790 * onboard checksum comes out correct.
2791 */
2792 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
2793 m_head->m_pkthdr.len < BNX_MIN_FRAMELEN) {
2794 error = m_devpad(m_head, BNX_MIN_FRAMELEN);
2795 if (error)
2796 goto back;
2797 }
2798
2799 if ((sc->bnx_flags & BNX_FLAG_SHORTDMA) && m_head->m_next != NULL) {
2800 m_new = bnx_defrag_shortdma(m_head);
2801 if (m_new == NULL) {
2802 error = ENOBUFS;
2803 goto back;
2804 }
2805 *m_head0 = m_head = m_new;
2806 }
2807 if (sc->bnx_force_defrag && m_head->m_next != NULL) {
2808 /*
2809 * Forcefully defragment mbuf chain to overcome hardware
2810 * limitation which only support a single outstanding
2811 * DMA read operation. If it fails, keep moving on using
2812 * the original mbuf chain.
2813 */
2814 m_new = m_defrag(m_head, MB_DONTWAIT);
2815 if (m_new != NULL)
2816 *m_head0 = m_head = m_new;
2817 }
2818
2819 error = bus_dmamap_load_mbuf_defrag(sc->bnx_cdata.bnx_tx_mtag, map,
2820 m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
2821 if (error)
2822 goto back;
2823
2824 m_head = *m_head0;
2825 bus_dmamap_sync(sc->bnx_cdata.bnx_tx_mtag, map, BUS_DMASYNC_PREWRITE);
2826
2827 for (i = 0; ; i++) {
2828 d = &sc->bnx_ldata.bnx_tx_ring[idx];
2829
2830 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
2831 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
2832 d->bge_len = segs[i].ds_len;
2833 d->bge_flags = csum_flags;
2834
2835 if (i == nsegs - 1)
2836 break;
2837 BNX_INC(idx, BGE_TX_RING_CNT);
2838 }
2839 /* Mark the last segment as end of packet... */
2840 d->bge_flags |= BGE_TXBDFLAG_END;
2841
2842 /* Set vlan tag to the first segment of the packet. */
2843 d = &sc->bnx_ldata.bnx_tx_ring[*txidx];
2844 if (m_head->m_flags & M_VLANTAG) {
2845 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2846 d->bge_vlan_tag = m_head->m_pkthdr.ether_vlantag;
2847 } else {
2848 d->bge_vlan_tag = 0;
2849 }
2850
2851 /*
2852 * Insure that the map for this transmission is placed at
2853 * the array index of the last descriptor in this chain.
2854 */
2855 sc->bnx_cdata.bnx_tx_dmamap[*txidx] = sc->bnx_cdata.bnx_tx_dmamap[idx];
2856 sc->bnx_cdata.bnx_tx_dmamap[idx] = map;
2857 sc->bnx_cdata.bnx_tx_chain[idx] = m_head;
2858 sc->bnx_txcnt += nsegs;
2859
2860 BNX_INC(idx, BGE_TX_RING_CNT);
2861 *txidx = idx;
2862back:
2863 if (error) {
2864 m_freem(*m_head0);
2865 *m_head0 = NULL;
2866 }
2867 return error;
2868}
2869
2870/*
2871 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2872 * to the mbuf data regions directly in the transmit descriptors.
2873 */
2874static void
2875bnx_start(struct ifnet *ifp)
2876{
2877 struct bnx_softc *sc = ifp->if_softc;
2878 struct mbuf *m_head = NULL;
2879 uint32_t prodidx;
2880 int need_trans;
2881
2882 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
2883 return;
2884
2885 prodidx = sc->bnx_tx_prodidx;
2886
2887 need_trans = 0;
2888 while (sc->bnx_cdata.bnx_tx_chain[prodidx] == NULL) {
2889 m_head = ifq_dequeue(&ifp->if_snd, NULL);
2890 if (m_head == NULL)
2891 break;
2892
2893 /*
2894 * XXX
2895 * The code inside the if() block is never reached since we
2896 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
2897 * requests to checksum TCP/UDP in a fragmented packet.
2898 *
2899 * XXX
2900 * safety overkill. If this is a fragmented packet chain
2901 * with delayed TCP/UDP checksums, then only encapsulate
2902 * it if we have enough descriptors to handle the entire
2903 * chain at once.
2904 * (paranoia -- may not actually be needed)
2905 */
2906 if ((m_head->m_flags & M_FIRSTFRAG) &&
2907 (m_head->m_pkthdr.csum_flags & CSUM_DELAY_DATA)) {
2908 if ((BGE_TX_RING_CNT - sc->bnx_txcnt) <
2909 m_head->m_pkthdr.csum_data + BNX_NSEG_RSVD) {
2910 ifp->if_flags |= IFF_OACTIVE;
2911 ifq_prepend(&ifp->if_snd, m_head);
2912 break;
2913 }
2914 }
2915
2916 /*
2917 * Sanity check: avoid coming within BGE_NSEG_RSVD
2918 * descriptors of the end of the ring. Also make
2919 * sure there are BGE_NSEG_SPARE descriptors for
2920 * jumbo buffers' defragmentation.
2921 */
2922 if ((BGE_TX_RING_CNT - sc->bnx_txcnt) <
2923 (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) {
2924 ifp->if_flags |= IFF_OACTIVE;
2925 ifq_prepend(&ifp->if_snd, m_head);
2926 break;
2927 }
2928
2929 /*
2930 * Pack the data into the transmit ring. If we
2931 * don't have room, set the OACTIVE flag and wait
2932 * for the NIC to drain the ring.
2933 */
2934 if (bnx_encap(sc, &m_head, &prodidx)) {
2935 ifp->if_flags |= IFF_OACTIVE;
2936 ifp->if_oerrors++;
2937 break;
2938 }
2939 need_trans = 1;
2940
2941 ETHER_BPF_MTAP(ifp, m_head);
2942 }
2943
2944 if (!need_trans)
2945 return;
2946
2947 /* Transmit */
2948 bnx_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2949
2950 sc->bnx_tx_prodidx = prodidx;
2951
2952 /*
2953 * Set a timeout in case the chip goes out to lunch.
2954 */
2955 ifp->if_timer = 5;
2956}
2957
2958static void
2959bnx_init(void *xsc)
2960{
2961 struct bnx_softc *sc = xsc;
2962 struct ifnet *ifp = &sc->arpcom.ac_if;
2963 uint16_t *m;
2964 uint32_t mode;
2965
2966 ASSERT_SERIALIZED(ifp->if_serializer);
2967
2968 /* Cancel pending I/O and flush buffers. */
2969 bnx_stop(sc);
2970 bnx_reset(sc);
2971 bnx_chipinit(sc);
2972
2973 /*
2974 * Init the various state machines, ring
2975 * control blocks and firmware.
2976 */
2977 if (bnx_blockinit(sc)) {
2978 if_printf(ifp, "initialization failure\n");
2979 bnx_stop(sc);
2980 return;
2981 }
2982
2983 /* Specify MTU. */
2984 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
2985 ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
2986
2987 /* Load our MAC address. */
2988 m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
2989 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2990 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2991
2992 /* Enable or disable promiscuous mode as needed. */
2993 bnx_setpromisc(sc);
2994
2995 /* Program multicast filter. */
2996 bnx_setmulti(sc);
2997
2998 /* Init RX ring. */
2999 if (bnx_init_rx_ring_std(sc)) {
3000 if_printf(ifp, "RX ring initialization failed\n");
3001 bnx_stop(sc);
3002 return;
3003 }
3004
3005 /* Init jumbo RX ring. */
3006 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) {
3007 if (bnx_init_rx_ring_jumbo(sc)) {
3008 if_printf(ifp, "Jumbo RX ring initialization failed\n");
3009 bnx_stop(sc);
3010 return;
3011 }
3012 }
3013
3014 /* Init our RX return ring index */
3015 sc->bnx_rx_saved_considx = 0;
3016
3017 /* Init TX ring. */
3018 bnx_init_tx_ring(sc);
3019
3020 /* Enable TX MAC state machine lockup fix. */
3021 mode = CSR_READ_4(sc, BGE_TX_MODE);
3022 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
3023 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
3024 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
3025 mode |= CSR_READ_4(sc, BGE_TX_MODE) &
3026 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
3027 }
3028 /* Turn on transmitter */
3029 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
3030
3031 /* Turn on receiver */
3032 BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3033
3034 /*
3035 * Set the number of good frames to receive after RX MBUF
3036 * Low Watermark has been reached. After the RX MAC receives
3037 * this number of frames, it will drop subsequent incoming
3038 * frames until the MBUF High Watermark is reached.
3039 */
bcb29629 3040 if (BNX_IS_57765_FAMILY(sc))
6c8d8ecc
SZ
3041 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
3042 else
3043 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
3044
3045 if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) {
3046 if (bootverbose) {
3047 if_printf(ifp, "MSI_MODE: %#x\n",
3048 CSR_READ_4(sc, BGE_MSI_MODE));
3049 }
3050 }
3051
3052 /* Tell firmware we're alive. */
3053 BNX_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3054
3055 /* Enable host interrupts if polling(4) is not enabled. */
3056 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA, 4);
3057#ifdef DEVICE_POLLING
3058 if (ifp->if_flags & IFF_POLLING)
3059 bnx_disable_intr(sc);
3060 else
3061#endif
3062 bnx_enable_intr(sc);
3063
3064 bnx_ifmedia_upd(ifp);
3065
3066 ifp->if_flags |= IFF_RUNNING;
3067 ifp->if_flags &= ~IFF_OACTIVE;
3068
8ca0f604
SZ
3069 callout_reset_bycpu(&sc->bnx_stat_timer, hz, bnx_tick, sc,
3070 sc->bnx_stat_cpuid);
6c8d8ecc
SZ
3071}
3072
3073/*
3074 * Set media options.
3075 */
3076static int
3077bnx_ifmedia_upd(struct ifnet *ifp)
3078{
3079 struct bnx_softc *sc = ifp->if_softc;
3080
3081 /* If this is a 1000baseX NIC, enable the TBI port. */
3082 if (sc->bnx_flags & BNX_FLAG_TBI) {
3083 struct ifmedia *ifm = &sc->bnx_ifmedia;
3084
3085 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3086 return(EINVAL);
3087
3088 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3089 case IFM_AUTO:
3090 break;
3091
3092 case IFM_1000_SX:
3093 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3094 BNX_CLRBIT(sc, BGE_MAC_MODE,
3095 BGE_MACMODE_HALF_DUPLEX);
3096 } else {
3097 BNX_SETBIT(sc, BGE_MAC_MODE,
3098 BGE_MACMODE_HALF_DUPLEX);
3099 }
3100 break;
3101 default:
3102 return(EINVAL);
3103 }
3104 } else {
3105 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3106
3107 sc->bnx_link_evt++;
3108 sc->bnx_link = 0;
3109 if (mii->mii_instance) {
3110 struct mii_softc *miisc;
3111
3112 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3113 mii_phy_reset(miisc);
3114 }
3115 mii_mediachg(mii);
3116
3117 /*
3118 * Force an interrupt so that we will call bnx_link_upd
3119 * if needed and clear any pending link state attention.
3120 * Without this we are not getting any further interrupts
3121 * for link state changes and thus will not UP the link and
3122 * not be able to send in bnx_start. The only way to get
3123 * things working was to receive a packet and get an RX
3124 * intr.
3125 *
3126 * bnx_tick should help for fiber cards and we might not
3127 * need to do this here if BNX_FLAG_TBI is set but as
3128 * we poll for fiber anyway it should not harm.
3129 */
3130 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3131 }
3132 return(0);
3133}
3134
3135/*
3136 * Report current media status.
3137 */
3138static void
3139bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3140{
3141 struct bnx_softc *sc = ifp->if_softc;
3142
3143 if (sc->bnx_flags & BNX_FLAG_TBI) {
3144 ifmr->ifm_status = IFM_AVALID;
3145 ifmr->ifm_active = IFM_ETHER;
3146 if (CSR_READ_4(sc, BGE_MAC_STS) &
3147 BGE_MACSTAT_TBI_PCS_SYNCHED) {
3148 ifmr->ifm_status |= IFM_ACTIVE;
3149 } else {
3150 ifmr->ifm_active |= IFM_NONE;
3151 return;
3152 }
3153
3154 ifmr->ifm_active |= IFM_1000_SX;
3155 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3156 ifmr->ifm_active |= IFM_HDX;
3157 else
3158 ifmr->ifm_active |= IFM_FDX;
3159 } else {
3160 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3161
3162 mii_pollstat(mii);
3163 ifmr->ifm_active = mii->mii_media_active;
3164 ifmr->ifm_status = mii->mii_media_status;
3165 }
3166}
3167
3168static int
3169bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
3170{
3171 struct bnx_softc *sc = ifp->if_softc;
3172 struct ifreq *ifr = (struct ifreq *)data;
3173 int mask, error = 0;
3174
3175 ASSERT_SERIALIZED(ifp->if_serializer);
3176
3177 switch (command) {
3178 case SIOCSIFMTU:
3179 if ((!BNX_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) ||
3180 (BNX_IS_JUMBO_CAPABLE(sc) &&
3181 ifr->ifr_mtu > BNX_JUMBO_MTU)) {
3182 error = EINVAL;
3183 } else if (ifp->if_mtu != ifr->ifr_mtu) {
3184 ifp->if_mtu = ifr->ifr_mtu;
3185 if (ifp->if_flags & IFF_RUNNING)
3186 bnx_init(sc);
3187 }
3188 break;
3189 case SIOCSIFFLAGS:
3190 if (ifp->if_flags & IFF_UP) {
3191 if (ifp->if_flags & IFF_RUNNING) {
3192 mask = ifp->if_flags ^ sc->bnx_if_flags;
3193
3194 /*
3195 * If only the state of the PROMISC flag
3196 * changed, then just use the 'set promisc
3197 * mode' command instead of reinitializing
3198 * the entire NIC. Doing a full re-init
3199 * means reloading the firmware and waiting
3200 * for it to start up, which may take a
3201 * second or two. Similarly for ALLMULTI.
3202 */
3203 if (mask & IFF_PROMISC)
3204 bnx_setpromisc(sc);
3205 if (mask & IFF_ALLMULTI)
3206 bnx_setmulti(sc);
3207 } else {
3208 bnx_init(sc);
3209 }
3210 } else if (ifp->if_flags & IFF_RUNNING) {
3211 bnx_stop(sc);
3212 }
3213 sc->bnx_if_flags = ifp->if_flags;
3214 break;
3215 case SIOCADDMULTI:
3216 case SIOCDELMULTI:
3217 if (ifp->if_flags & IFF_RUNNING)
3218 bnx_setmulti(sc);
3219 break;
3220 case SIOCSIFMEDIA:
3221 case SIOCGIFMEDIA:
3222 if (sc->bnx_flags & BNX_FLAG_TBI) {
3223 error = ifmedia_ioctl(ifp, ifr,
3224 &sc->bnx_ifmedia, command);
3225 } else {
3226 struct mii_data *mii;
3227
3228 mii = device_get_softc(sc->bnx_miibus);
3229 error = ifmedia_ioctl(ifp, ifr,
3230 &mii->mii_media, command);
3231 }
3232 break;
3233 case SIOCSIFCAP:
3234 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3235 if (mask & IFCAP_HWCSUM) {
3236 ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
3237 if (IFCAP_HWCSUM & ifp->if_capenable)
3238 ifp->if_hwassist = BNX_CSUM_FEATURES;
3239 else
3240 ifp->if_hwassist = 0;
3241 }
3242 break;
3243 default:
3244 error = ether_ioctl(ifp, command, data);
3245 break;
3246 }
3247 return error;
3248}
3249
3250static void
3251bnx_watchdog(struct ifnet *ifp)
3252{
3253 struct bnx_softc *sc = ifp->if_softc;
3254
3255 if_printf(ifp, "watchdog timeout -- resetting\n");
3256
3257 bnx_init(sc);
3258
3259 ifp->if_oerrors++;
3260
3261 if (!ifq_is_empty(&ifp->if_snd))
3262 if_devstart(ifp);
3263}
3264
3265/*
3266 * Stop the adapter and free any mbufs allocated to the
3267 * RX and TX lists.
3268 */
3269static void
3270bnx_stop(struct bnx_softc *sc)
3271{
3272 struct ifnet *ifp = &sc->arpcom.ac_if;
3273
3274 ASSERT_SERIALIZED(ifp->if_serializer);
3275
3276 callout_stop(&sc->bnx_stat_timer);
3277
3278 /*
3279 * Disable all of the receiver blocks
3280 */
3281 bnx_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3282 bnx_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3283 bnx_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3284 bnx_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3285 bnx_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3286 bnx_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3287
3288 /*
3289 * Disable all of the transmit blocks
3290 */
3291 bnx_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3292 bnx_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3293 bnx_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3294 bnx_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3295 bnx_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3296 bnx_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3297
3298 /*
3299 * Shut down all of the memory managers and related
3300 * state machines.
3301 */
3302 bnx_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3303 bnx_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3304 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3305 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3306
3307 /* Disable host interrupts. */
3308 bnx_disable_intr(sc);
3309
3310 /*
3311 * Tell firmware we're shutting down.
3312 */
3313 BNX_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3314
3315 /* Free the RX lists. */
3316 bnx_free_rx_ring_std(sc);
3317
3318 /* Free jumbo RX list. */
3319 if (BNX_IS_JUMBO_CAPABLE(sc))
3320 bnx_free_rx_ring_jumbo(sc);
3321
3322 /* Free TX buffers. */
3323 bnx_free_tx_ring(sc);
3324
3325 sc->bnx_status_tag = 0;
3326 sc->bnx_link = 0;
3327 sc->bnx_coal_chg = 0;
3328
3329 sc->bnx_tx_saved_considx = BNX_TXCONS_UNSET;
3330
3331 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3332 ifp->if_timer = 0;
3333}
3334
3335/*
3336 * Stop all chip I/O so that the kernel's probe routines don't
3337 * get confused by errant DMAs when rebooting.
3338 */
3339static void
3340bnx_shutdown(device_t dev)
3341{
3342 struct bnx_softc *sc = device_get_softc(dev);
3343 struct ifnet *ifp = &sc->arpcom.ac_if;
3344
3345 lwkt_serialize_enter(ifp->if_serializer);
3346 bnx_stop(sc);
3347 bnx_reset(sc);
3348 lwkt_serialize_exit(ifp->if_serializer);
3349}
3350
3351static int
3352bnx_suspend(device_t dev)
3353{
3354 struct bnx_softc *sc = device_get_softc(dev);
3355 struct ifnet *ifp = &sc->arpcom.ac_if;
3356
3357 lwkt_serialize_enter(ifp->if_serializer);
3358 bnx_stop(sc);
3359 lwkt_serialize_exit(ifp->if_serializer);
3360
3361 return 0;
3362}
3363
3364static int
3365bnx_resume(device_t dev)
3366{
3367 struct bnx_softc *sc = device_get_softc(dev);
3368 struct ifnet *ifp = &sc->arpcom.ac_if;
3369
3370 lwkt_serialize_enter(ifp->if_serializer);
3371
3372 if (ifp->if_flags & IFF_UP) {
3373 bnx_init(sc);
3374
3375 if (!ifq_is_empty(&ifp->if_snd))
3376 if_devstart(ifp);
3377 }
3378
3379 lwkt_serialize_exit(ifp->if_serializer);
3380
3381 return 0;
3382}
3383
3384static void
3385bnx_setpromisc(struct bnx_softc *sc)
3386{
3387 struct ifnet *ifp = &sc->arpcom.ac_if;
3388
3389 if (ifp->if_flags & IFF_PROMISC)
3390 BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3391 else
3392 BNX_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3393}
3394
3395static void
3396bnx_dma_free(struct bnx_softc *sc)
3397{
3398 int i;
3399
3400 /* Destroy RX mbuf DMA stuffs. */
3401 if (sc->bnx_cdata.bnx_rx_mtag != NULL) {
3402 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3403 bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag,
3404 sc->bnx_cdata.bnx_rx_std_dmamap[i]);
3405 }
3406 bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag,
3407 sc->bnx_cdata.bnx_rx_tmpmap);
3408 bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag);
3409 }
3410
3411 /* Destroy TX mbuf DMA stuffs. */
3412 if (sc->bnx_cdata.bnx_tx_mtag != NULL) {
3413 for (i = 0; i < BGE_TX_RING_CNT; i++) {
3414 bus_dmamap_destroy(sc->bnx_cdata.bnx_tx_mtag,
3415 sc->bnx_cdata.bnx_tx_dmamap[i]);
3416 }
3417 bus_dma_tag_destroy(sc->bnx_cdata.bnx_tx_mtag);
3418 }
3419
3420 /* Destroy standard RX ring */
3421 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_std_ring_tag,
3422 sc->bnx_cdata.bnx_rx_std_ring_map,
3423 sc->bnx_ldata.bnx_rx_std_ring);
3424
3425 if (BNX_IS_JUMBO_CAPABLE(sc))
3426 bnx_free_jumbo_mem(sc);
3427
3428 /* Destroy RX return ring */
3429 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_return_ring_tag,
3430 sc->bnx_cdata.bnx_rx_return_ring_map,
3431 sc->bnx_ldata.bnx_rx_return_ring);
3432
3433 /* Destroy TX ring */
3434 bnx_dma_block_free(sc->bnx_cdata.bnx_tx_ring_tag,
3435 sc->bnx_cdata.bnx_tx_ring_map,
3436 sc->bnx_ldata.bnx_tx_ring);
3437
3438 /* Destroy status block */
3439 bnx_dma_block_free(sc->bnx_cdata.bnx_status_tag,
3440 sc->bnx_cdata.bnx_status_map,
3441 sc->bnx_ldata.bnx_status_block);
3442
3443 /* Destroy the parent tag */
3444 if (sc->bnx_cdata.bnx_parent_tag != NULL)
3445 bus_dma_tag_destroy(sc->bnx_cdata.bnx_parent_tag);
3446}
3447
3448static int
3449bnx_dma_alloc(struct bnx_softc *sc)
3450{
3451 struct ifnet *ifp = &sc->arpcom.ac_if;
3452 int i, error;
3453
3454 /*
3455 * Allocate the parent bus DMA tag appropriate for PCI.
3456 *
3457 * All of the NetExtreme/NetLink controllers have 4GB boundary
3458 * DMA bug.
3459 * Whenever an address crosses a multiple of the 4GB boundary
3460 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
3461 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
3462 * state machine will lockup and cause the device to hang.
3463 */
3464 error = bus_dma_tag_create(NULL, 1, BGE_DMA_BOUNDARY_4G,
3465 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3466 NULL, NULL,
3467 BUS_SPACE_MAXSIZE_32BIT, 0,
3468 BUS_SPACE_MAXSIZE_32BIT,
3469 0, &sc->bnx_cdata.bnx_parent_tag);
3470 if (error) {
3471 if_printf(ifp, "could not allocate parent dma tag\n");
3472 return error;
3473 }
3474
3475 /*
3476 * Create DMA tag and maps for RX mbufs.
3477 */
3478 error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0,
3479 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3480 NULL, NULL, MCLBYTES, 1, MCLBYTES,
3481 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
3482 &sc->bnx_cdata.bnx_rx_mtag);
3483 if (error) {
3484 if_printf(ifp, "could not allocate RX mbuf dma tag\n");
3485 return error;
3486 }
3487
3488 error = bus_dmamap_create(sc->bnx_cdata.bnx_rx_mtag,
3489 BUS_DMA_WAITOK, &sc->bnx_cdata.bnx_rx_tmpmap);
3490 if (error) {
3491 bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag);
3492 sc->bnx_cdata.bnx_rx_mtag = NULL;
3493 return error;
3494 }
3495
3496 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3497 error = bus_dmamap_create(sc->bnx_cdata.bnx_rx_mtag,
3498 BUS_DMA_WAITOK,
3499 &sc->bnx_cdata.bnx_rx_std_dmamap[i]);
3500 if (error) {
3501 int j;
3502
3503 for (j = 0; j < i; ++j) {
3504 bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag,
3505 sc->bnx_cdata.bnx_rx_std_dmamap[j]);
3506 }
3507 bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag);
3508 sc->bnx_cdata.bnx_rx_mtag = NULL;
3509
3510 if_printf(ifp, "could not create DMA map for RX\n");
3511 return error;
3512 }
3513 }
3514
3515 /*
3516 * Create DMA tag and maps for TX mbufs.
3517 */
3518 error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0,
3519 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3520 NULL, NULL,
3521 BNX_JUMBO_FRAMELEN, BNX_NSEG_NEW, MCLBYTES,
3522 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK |
3523 BUS_DMA_ONEBPAGE,
3524 &sc->bnx_cdata.bnx_tx_mtag);
3525 if (error) {
3526 if_printf(ifp, "could not allocate TX mbuf dma tag\n");
3527 return error;
3528 }
3529
3530 for (i = 0; i < BGE_TX_RING_CNT; i++) {
3531 error = bus_dmamap_create(sc->bnx_cdata.bnx_tx_mtag,
3532 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
3533 &sc->bnx_cdata.bnx_tx_dmamap[i]);
3534 if (error) {
3535 int j;
3536
3537 for (j = 0; j < i; ++j) {
3538 bus_dmamap_destroy(sc->bnx_cdata.bnx_tx_mtag,
3539 sc->bnx_cdata.bnx_tx_dmamap[j]);
3540 }
3541 bus_dma_tag_destroy(sc->bnx_cdata.bnx_tx_mtag);
3542 sc->bnx_cdata.bnx_tx_mtag = NULL;
3543
3544 if_printf(ifp, "could not create DMA map for TX\n");
3545 return error;
3546 }
3547 }
3548
3549 /*
3550 * Create DMA stuffs for standard RX ring.
3551 */
3552 error = bnx_dma_block_alloc(sc, BGE_STD_RX_RING_SZ,
3553 &sc->bnx_cdata.bnx_rx_std_ring_tag,
3554 &sc->bnx_cdata.bnx_rx_std_ring_map,
3555 (void *)&sc->bnx_ldata.bnx_rx_std_ring,
3556 &sc->bnx_ldata.bnx_rx_std_ring_paddr);
3557 if (error) {
3558 if_printf(ifp, "could not create std RX ring\n");
3559 return error;
3560 }
3561
3562 /*
3563 * Create jumbo buffer pool.
3564 */
3565 if (BNX_IS_JUMBO_CAPABLE(sc)) {
3566 error = bnx_alloc_jumbo_mem(sc);
3567 if (error) {
3568 if_printf(ifp, "could not create jumbo buffer pool\n");
3569 return error;
3570 }
3571 }
3572
3573 /*
3574 * Create DMA stuffs for RX return ring.
3575 */
3576 error = bnx_dma_block_alloc(sc,
3577 BGE_RX_RTN_RING_SZ(sc->bnx_return_ring_cnt),
3578 &sc->bnx_cdata.bnx_rx_return_ring_tag,
3579 &sc->bnx_cdata.bnx_rx_return_ring_map,
3580 (void *)&sc->bnx_ldata.bnx_rx_return_ring,
3581 &sc->bnx_ldata.bnx_rx_return_ring_paddr);
3582 if (error) {
3583 if_printf(ifp, "could not create RX ret ring\n");
3584 return error;
3585 }
3586
3587 /*
3588 * Create DMA stuffs for TX ring.
3589 */
3590 error = bnx_dma_block_alloc(sc, BGE_TX_RING_SZ,
3591 &sc->bnx_cdata.bnx_tx_ring_tag,
3592 &sc->bnx_cdata.bnx_tx_ring_map,
3593 (void *)&sc->bnx_ldata.bnx_tx_ring,
3594 &sc->bnx_ldata.bnx_tx_ring_paddr);
3595 if (error) {
3596 if_printf(ifp, "could not create TX ring\n");
3597 return error;
3598 }
3599
3600 /*
3601 * Create DMA stuffs for status block.
3602 */
3603 error = bnx_dma_block_alloc(sc, BGE_STATUS_BLK_SZ,
3604 &sc->bnx_cdata.bnx_status_tag,
3605 &sc->bnx_cdata.bnx_status_map,
3606 (void *)&sc->bnx_ldata.bnx_status_block,
3607 &sc->bnx_ldata.bnx_status_block_paddr);
3608 if (error) {
3609 if_printf(ifp, "could not create status block\n");
3610 return error;
3611 }
3612
3613 return 0;
3614}
3615
3616static int
3617bnx_dma_block_alloc(struct bnx_softc *sc, bus_size_t size, bus_dma_tag_t *tag,
3618 bus_dmamap_t *map, void **addr, bus_addr_t *paddr)
3619{
3620 bus_dmamem_t dmem;
3621 int error;
3622
3623 error = bus_dmamem_coherent(sc->bnx_cdata.bnx_parent_tag, PAGE_SIZE, 0,
3624 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3625 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3626 if (error)
3627 return error;
3628
3629 *tag = dmem.dmem_tag;
3630 *map = dmem.dmem_map;
3631 *addr = dmem.dmem_addr;
3632 *paddr = dmem.dmem_busaddr;
3633
3634 return 0;
3635}
3636
3637static void
3638bnx_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr)
3639{
3640 if (tag != NULL) {
3641 bus_dmamap_unload(tag, map);
3642 bus_dmamem_free(tag, addr, map);
3643 bus_dma_tag_destroy(tag);
3644 }
3645}
3646
3647static void
3648bnx_tbi_link_upd(struct bnx_softc *sc, uint32_t status)
3649{
3650 struct ifnet *ifp = &sc->arpcom.ac_if;
3651
3652#define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE)
3653
3654 /*
3655 * Sometimes PCS encoding errors are detected in
3656 * TBI mode (on fiber NICs), and for some reason
3657 * the chip will signal them as link changes.
3658 * If we get a link change event, but the 'PCS
3659 * encoding error' bit in the MAC status register
3660 * is set, don't bother doing a link check.
3661 * This avoids spurious "gigabit link up" messages
3662 * that sometimes appear on fiber NICs during
3663 * periods of heavy traffic.
3664 */
3665 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3666 if (!sc->bnx_link) {
3667 sc->bnx_link++;
3668 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704) {
3669 BNX_CLRBIT(sc, BGE_MAC_MODE,
3670 BGE_MACMODE_TBI_SEND_CFGS);
3671 }
3672 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3673
3674 if (bootverbose)
3675 if_printf(ifp, "link UP\n");
3676
3677 ifp->if_link_state = LINK_STATE_UP;
3678 if_link_state_change(ifp);
3679 }
3680 } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) {
3681 if (sc->bnx_link) {
3682 sc->bnx_link = 0;
3683
3684 if (bootverbose)
3685 if_printf(ifp, "link DOWN\n");
3686
3687 ifp->if_link_state = LINK_STATE_DOWN;
3688 if_link_state_change(ifp);
3689 }
3690 }
3691
3692#undef PCS_ENCODE_ERR
3693
3694 /* Clear the attention. */
3695 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3696 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3697 BGE_MACSTAT_LINK_CHANGED);
3698}
3699
3700static void
3701bnx_copper_link_upd(struct bnx_softc *sc, uint32_t status __unused)
3702{
3703 struct ifnet *ifp = &sc->arpcom.ac_if;
3704 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3705
3706 mii_pollstat(mii);
3707 bnx_miibus_statchg(sc->bnx_dev);
3708
3709 if (bootverbose) {
3710 if (sc->bnx_link)
3711 if_printf(ifp, "link UP\n");
3712 else
3713 if_printf(ifp, "link DOWN\n");
3714 }
3715
3716 /* Clear the attention. */
3717 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3718 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3719 BGE_MACSTAT_LINK_CHANGED);
3720}
3721
3722static void
3723bnx_autopoll_link_upd(struct bnx_softc *sc, uint32_t status __unused)
3724{
3725 struct ifnet *ifp = &sc->arpcom.ac_if;
3726 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3727
3728 mii_pollstat(mii);
3729
3730 if (!sc->bnx_link &&
3731 (mii->mii_media_status & IFM_ACTIVE) &&
3732 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3733 sc->bnx_link++;
3734 if (bootverbose)
3735 if_printf(ifp, "link UP\n");
3736 } else if (sc->bnx_link &&
3737 (!(mii->mii_media_status & IFM_ACTIVE) ||
3738 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3739 sc->bnx_link = 0;
3740 if (bootverbose)
3741 if_printf(ifp, "link DOWN\n");
3742 }
3743
3744 /* Clear the attention. */
3745 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3746 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3747 BGE_MACSTAT_LINK_CHANGED);
3748}
3749
3750static int
3751bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS)
3752{
3753 struct bnx_softc *sc = arg1;
3754
3755 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3756 &sc->bnx_rx_coal_ticks,
3757 BNX_RX_COAL_TICKS_MIN, BNX_RX_COAL_TICKS_MAX,
3758 BNX_RX_COAL_TICKS_CHG);
3759}
3760
3761static int
3762bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS)
3763{
3764 struct bnx_softc *sc = arg1;
3765
3766 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3767 &sc->bnx_tx_coal_ticks,
3768 BNX_TX_COAL_TICKS_MIN, BNX_TX_COAL_TICKS_MAX,
3769 BNX_TX_COAL_TICKS_CHG);
3770}
3771
3772static int
3773bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS)
3774{
3775 struct bnx_softc *sc = arg1;
3776
3777 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3778 &sc->bnx_rx_coal_bds,
3779 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX,
3780 BNX_RX_COAL_BDS_CHG);
3781}
3782
3783static int
3784bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS)
3785{
3786 struct bnx_softc *sc = arg1;
3787
3788 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3789 &sc->bnx_tx_coal_bds,
3790 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX,
3791 BNX_TX_COAL_BDS_CHG);
3792}
3793
3794static int
3795bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS)
3796{
3797 struct bnx_softc *sc = arg1;
3798
3799 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3800 &sc->bnx_rx_coal_bds_int,
3801 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX,
3802 BNX_RX_COAL_BDS_INT_CHG);
3803}
3804
3805static int
3806bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS)
3807{
3808 struct bnx_softc *sc = arg1;
3809
3810 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3811 &sc->bnx_tx_coal_bds_int,
3812 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX,
3813 BNX_TX_COAL_BDS_INT_CHG);
3814}
3815
3816static int
3817bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal,
3818 int coal_min, int coal_max, uint32_t coal_chg_mask)
3819{
3820 struct bnx_softc *sc = arg1;
3821 struct ifnet *ifp = &sc->arpcom.ac_if;
3822 int error = 0, v;
3823
3824 lwkt_serialize_enter(ifp->if_serializer);
3825
3826 v = *coal;
3827 error = sysctl_handle_int(oidp, &v, 0, req);
3828 if (!error && req->newptr != NULL) {
3829 if (v < coal_min || v > coal_max) {
3830 error = EINVAL;
3831 } else {
3832 *coal = v;
3833 sc->bnx_coal_chg |= coal_chg_mask;
3834 }
3835 }
3836
3837 lwkt_serialize_exit(ifp->if_serializer);
3838 return error;
3839}
3840
3841static void
3842bnx_coal_change(struct bnx_softc *sc)
3843{
3844 struct ifnet *ifp = &sc->arpcom.ac_if;
3845 uint32_t val;
3846
3847 ASSERT_SERIALIZED(ifp->if_serializer);
3848
3849 if (sc->bnx_coal_chg & BNX_RX_COAL_TICKS_CHG) {
3850 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS,
3851 sc->bnx_rx_coal_ticks);
3852 DELAY(10);
3853 val = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
3854
3855 if (bootverbose) {
3856 if_printf(ifp, "rx_coal_ticks -> %u\n",
3857 sc->bnx_rx_coal_ticks);
3858 }
3859 }
3860
3861 if (sc->bnx_coal_chg & BNX_TX_COAL_TICKS_CHG) {
3862 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS,
3863 sc->bnx_tx_coal_ticks);
3864 DELAY(10);
3865 val = CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS);
3866
3867 if (bootverbose) {
3868 if_printf(ifp, "tx_coal_ticks -> %u\n",
3869 sc->bnx_tx_coal_ticks);
3870 }
3871 }
3872
3873 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_CHG) {
3874 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS,
3875 sc->bnx_rx_coal_bds);
3876 DELAY(10);
3877 val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
3878
3879 if (bootverbose) {
3880 if_printf(ifp, "rx_coal_bds -> %u\n",
3881 sc->bnx_rx_coal_bds);
3882 }
3883 }
3884
3885 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_CHG) {
3886 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS,
3887 sc->bnx_tx_coal_bds);
3888 DELAY(10);
3889 val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS);
3890
3891 if (bootverbose) {
3892 if_printf(ifp, "tx_max_coal_bds -> %u\n",
3893 sc->bnx_tx_coal_bds);
3894 }
3895 }
3896
3897 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_INT_CHG) {
3898 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT,
3899 sc->bnx_rx_coal_bds_int);
3900 DELAY(10);
3901 val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT);
3902
3903 if (bootverbose) {
3904 if_printf(ifp, "rx_coal_bds_int -> %u\n",
3905 sc->bnx_rx_coal_bds_int);
3906 }
3907 }
3908
3909 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_INT_CHG) {
3910 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT,
3911 sc->bnx_tx_coal_bds_int);
3912 DELAY(10);
3913 val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT);
3914
3915 if (bootverbose) {
3916 if_printf(ifp, "tx_coal_bds_int -> %u\n",
3917 sc->bnx_tx_coal_bds_int);
3918 }
3919 }
3920
3921 sc->bnx_coal_chg = 0;
3922}
3923
3924static void
3925bnx_enable_intr(struct bnx_softc *sc)
3926{
3927 struct ifnet *ifp = &sc->arpcom.ac_if;
3928
3929 lwkt_serialize_handler_enable(ifp->if_serializer);
3930
3931 /*
3932 * Enable interrupt.
3933 */
3934 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24);
3935 if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) {
3936 /* XXX Linux driver */
3937 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24);
3938 }
3939
3940 /*
3941 * Unmask the interrupt when we stop polling.
3942 */
3943 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_MISC_CTL,
3944 BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
3945
3946 /*
3947 * Trigger another interrupt, since above writing
3948 * to interrupt mailbox0 may acknowledge pending
3949 * interrupt.
3950 */
3951 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3952}
3953
3954static void
3955bnx_disable_intr(struct bnx_softc *sc)
3956{
3957 struct ifnet *ifp = &sc->arpcom.ac_if;
3958
3959 /*
3960 * Mask the interrupt when we start polling.
3961 */
3962 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL,
3963 BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
3964
3965 /*
3966 * Acknowledge possible asserted interrupt.
3967 */
3968 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3969
3970 lwkt_serialize_handler_disable(ifp->if_serializer);
3971}
3972
3973static int
3974bnx_get_eaddr_mem(struct bnx_softc *sc, uint8_t ether_addr[])
3975{
3976 uint32_t mac_addr;
3977 int ret = 1;
3978
3979 mac_addr = bnx_readmem_ind(sc, 0x0c14);
3980 if ((mac_addr >> 16) == 0x484b) {
3981 ether_addr[0] = (uint8_t)(mac_addr >> 8);
3982 ether_addr[1] = (uint8_t)mac_addr;
3983 mac_addr = bnx_readmem_ind(sc, 0x0c18);
3984 ether_addr[2] = (uint8_t)(mac_addr >> 24);
3985 ether_addr[3] = (uint8_t)(mac_addr >> 16);
3986 ether_addr[4] = (uint8_t)(mac_addr >> 8);
3987 ether_addr[5] = (uint8_t)mac_addr;
3988 ret = 0;
3989 }
3990 return ret;
3991}
3992
3993static int
3994bnx_get_eaddr_nvram(struct bnx_softc *sc, uint8_t ether_addr[])
3995{
3996 int mac_offset = BGE_EE_MAC_OFFSET;
3997
80969639
SZ
3998 if (BNX_IS_5717_PLUS(sc)) {
3999 int f;
4000
4001 f = pci_get_function(sc->bnx_dev);
4002 if (f & 1)
4003 mac_offset = BGE_EE_MAC_OFFSET_5717;
4004 if (f > 1)
4005 mac_offset += BGE_EE_MAC_OFFSET_5717_OFF;
4006 } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
6c8d8ecc 4007 mac_offset = BGE_EE_MAC_OFFSET_5906;
80969639 4008 }
6c8d8ecc
SZ
4009
4010 return bnx_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN);
4011}
4012
4013static int
4014bnx_get_eaddr_eeprom(struct bnx_softc *sc, uint8_t ether_addr[])
4015{
4016 if (sc->bnx_flags & BNX_FLAG_NO_EEPROM)
4017 return 1;
4018
4019 return bnx_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
4020 ETHER_ADDR_LEN);
4021}
4022
4023static int
4024bnx_get_eaddr(struct bnx_softc *sc, uint8_t eaddr[])
4025{
4026 static const bnx_eaddr_fcn_t bnx_eaddr_funcs[] = {
4027 /* NOTE: Order is critical */
4028 bnx_get_eaddr_mem,
4029 bnx_get_eaddr_nvram,
4030 bnx_get_eaddr_eeprom,
4031 NULL
4032 };
4033 const bnx_eaddr_fcn_t *func;
4034
4035 for (func = bnx_eaddr_funcs; *func != NULL; ++func) {
4036 if ((*func)(sc, eaddr) == 0)
4037 break;
4038 }
4039 return (*func == NULL ? ENXIO : 0);
4040}
4041
4042/*
4043 * NOTE: 'm' is not freed upon failure
4044 */
4045struct mbuf *
4046bnx_defrag_shortdma(struct mbuf *m)
4047{
4048 struct mbuf *n;
4049 int found;
4050
4051 /*
4052 * If device receive two back-to-back send BDs with less than
4053 * or equal to 8 total bytes then the device may hang. The two
4054 * back-to-back send BDs must in the same frame for this failure
4055 * to occur. Scan mbuf chains and see whether two back-to-back
4056 * send BDs are there. If this is the case, allocate new mbuf
4057 * and copy the frame to workaround the silicon bug.
4058 */
4059 for (n = m, found = 0; n != NULL; n = n->m_next) {
4060 if (n->m_len < 8) {
4061 found++;
4062 if (found > 1)
4063 break;
4064 continue;
4065 }
4066 found = 0;
4067 }
4068
4069 if (found > 1)
4070 n = m_defrag(m, MB_DONTWAIT);
4071 else
4072 n = m;
4073 return n;
4074}
4075
4076static void
4077bnx_stop_block(struct bnx_softc *sc, bus_size_t reg, uint32_t bit)
4078{
4079 int i;
4080
4081 BNX_CLRBIT(sc, reg, bit);
4082 for (i = 0; i < BNX_TIMEOUT; i++) {
4083 if ((CSR_READ_4(sc, reg) & bit) == 0)
4084 return;
4085 DELAY(100);
4086 }
4087}
4088
4089static void
4090bnx_link_poll(struct bnx_softc *sc)
4091{
4092 uint32_t status;
4093
4094 status = CSR_READ_4(sc, BGE_MAC_STS);
4095 if ((status & sc->bnx_link_chg) || sc->bnx_link_evt) {
4096 sc->bnx_link_evt = 0;
4097 sc->bnx_link_upd(sc, status);
4098 }
4099}
4100
4101static void
4102bnx_enable_msi(struct bnx_softc *sc)
4103{
4104 uint32_t msi_mode;
4105
4106 msi_mode = CSR_READ_4(sc, BGE_MSI_MODE);
4107 msi_mode |= BGE_MSIMODE_ENABLE;
4108 if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) {
4109 /*
4110 * NOTE:
4111 * 5718-PG105-R says that "one shot" mode
4112 * does not work if MSI is used, however,
4113 * it obviously works.
4114 */
4115 msi_mode &= ~BGE_MSIMODE_ONESHOT_DISABLE;
4116 }
4117 CSR_WRITE_4(sc, BGE_MSI_MODE, msi_mode);
4118}
4119
4120static uint32_t
4121bnx_dma_swap_options(struct bnx_softc *sc)
4122{
4123 uint32_t dma_options;
4124
4125 dma_options = BGE_MODECTL_WORDSWAP_NONFRAME |
4126 BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA;
4127#if BYTE_ORDER == BIG_ENDIAN
4128 dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME;
4129#endif
4130 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
4131 dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA |
4132 BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE |
4133 BGE_MODECTL_HTX2B_ENABLE;
4134 }
4135 return dma_options;