Merge branch 'vendor/FLEX'
[dragonfly.git] / sys / dev / netif / bnx / if_bnx.c
1 /*
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
34  */
35
36 #include "opt_bnx.h"
37 #include "opt_ifpoll.h"
38
39 #include <sys/param.h>
40 #include <sys/bus.h>
41 #include <sys/endian.h>
42 #include <sys/kernel.h>
43 #include <sys/interrupt.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/queue.h>
47 #include <sys/rman.h>
48 #include <sys/serialize.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/sysctl.h>
52
53 #include <netinet/ip.h>
54 #include <netinet/tcp.h>
55
56 #include <net/bpf.h>
57 #include <net/ethernet.h>
58 #include <net/if.h>
59 #include <net/if_arp.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/if_poll.h>
63 #include <net/if_types.h>
64 #include <net/ifq_var.h>
65 #include <net/vlan/if_vlan_var.h>
66 #include <net/vlan/if_vlan_ether.h>
67
68 #include <dev/netif/mii_layer/mii.h>
69 #include <dev/netif/mii_layer/miivar.h>
70 #include <dev/netif/mii_layer/brgphyreg.h>
71
72 #include <bus/pci/pcidevs.h>
73 #include <bus/pci/pcireg.h>
74 #include <bus/pci/pcivar.h>
75
76 #include <dev/netif/bge/if_bgereg.h>
77 #include <dev/netif/bnx/if_bnxvar.h>
78
79 /* "device miibus" required.  See GENERIC if you get errors here. */
80 #include "miibus_if.h"
81
82 #define BNX_CSUM_FEATURES       (CSUM_IP | CSUM_TCP | CSUM_UDP)
83
84 #define BNX_INTR_CKINTVL        ((10 * hz) / 1000)      /* 10ms */
85
86 static const struct bnx_type {
87         uint16_t                bnx_vid;
88         uint16_t                bnx_did;
89         char                    *bnx_name;
90 } bnx_devs[] = {
91         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717,
92                 "Broadcom BCM5717 Gigabit Ethernet" },
93         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718,
94                 "Broadcom BCM5718 Gigabit Ethernet" },
95         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5719,
96                 "Broadcom BCM5719 Gigabit Ethernet" },
97         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720_ALT,
98                 "Broadcom BCM5720 Gigabit Ethernet" },
99
100         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57761,
101                 "Broadcom BCM57761 Gigabit Ethernet" },
102         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57762,
103                 "Broadcom BCM57762 Gigabit Ethernet" },
104         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57765,
105                 "Broadcom BCM57765 Gigabit Ethernet" },
106         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57766,
107                 "Broadcom BCM57766 Gigabit Ethernet" },
108         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57781,
109                 "Broadcom BCM57781 Gigabit Ethernet" },
110         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57782,
111                 "Broadcom BCM57782 Gigabit Ethernet" },
112         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57785,
113                 "Broadcom BCM57785 Gigabit Ethernet" },
114         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57786,
115                 "Broadcom BCM57786 Gigabit Ethernet" },
116         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57791,
117                 "Broadcom BCM57791 Fast Ethernet" },
118         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57795,
119                 "Broadcom BCM57795 Fast Ethernet" },
120
121         { 0, 0, NULL }
122 };
123
124 #define BNX_IS_JUMBO_CAPABLE(sc)        ((sc)->bnx_flags & BNX_FLAG_JUMBO)
125 #define BNX_IS_5717_PLUS(sc)            ((sc)->bnx_flags & BNX_FLAG_5717_PLUS)
126 #define BNX_IS_57765_PLUS(sc)           ((sc)->bnx_flags & BNX_FLAG_57765_PLUS)
127 #define BNX_IS_57765_FAMILY(sc)  \
128         ((sc)->bnx_flags & BNX_FLAG_57765_FAMILY)
129
130 typedef int     (*bnx_eaddr_fcn_t)(struct bnx_softc *, uint8_t[]);
131
132 static int      bnx_probe(device_t);
133 static int      bnx_attach(device_t);
134 static int      bnx_detach(device_t);
135 static void     bnx_shutdown(device_t);
136 static int      bnx_suspend(device_t);
137 static int      bnx_resume(device_t);
138 static int      bnx_miibus_readreg(device_t, int, int);
139 static int      bnx_miibus_writereg(device_t, int, int, int);
140 static void     bnx_miibus_statchg(device_t);
141
142 #ifdef IFPOLL_ENABLE
143 static void     bnx_npoll(struct ifnet *, struct ifpoll_info *);
144 static void     bnx_npoll_compat(struct ifnet *, void *, int);
145 #endif
146 static void     bnx_intr_legacy(void *);
147 static void     bnx_msi(void *);
148 static void     bnx_msi_oneshot(void *);
149 static void     bnx_intr(struct bnx_softc *);
150 static void     bnx_enable_intr(struct bnx_softc *);
151 static void     bnx_disable_intr(struct bnx_softc *);
152 static void     bnx_txeof(struct bnx_softc *, uint16_t);
153 static void     bnx_rxeof(struct bnx_softc *, uint16_t, int);
154
155 static void     bnx_start(struct ifnet *, struct ifaltq_subque *);
156 static int      bnx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
157 static void     bnx_init(void *);
158 static void     bnx_stop(struct bnx_softc *);
159 static void     bnx_watchdog(struct ifnet *);
160 static int      bnx_ifmedia_upd(struct ifnet *);
161 static void     bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *);
162 static void     bnx_tick(void *);
163
164 static int      bnx_alloc_jumbo_mem(struct bnx_softc *);
165 static void     bnx_free_jumbo_mem(struct bnx_softc *);
166 static struct bnx_jslot
167                 *bnx_jalloc(struct bnx_softc *);
168 static void     bnx_jfree(void *);
169 static void     bnx_jref(void *);
170 static int      bnx_newbuf_std(struct bnx_softc *, int, int);
171 static int      bnx_newbuf_jumbo(struct bnx_softc *, int, int);
172 static void     bnx_setup_rxdesc_std(struct bnx_softc *, int);
173 static void     bnx_setup_rxdesc_jumbo(struct bnx_softc *, int);
174 static int      bnx_init_rx_ring_std(struct bnx_softc *);
175 static void     bnx_free_rx_ring_std(struct bnx_softc *);
176 static int      bnx_init_rx_ring_jumbo(struct bnx_softc *);
177 static void     bnx_free_rx_ring_jumbo(struct bnx_softc *);
178 static void     bnx_free_tx_ring(struct bnx_softc *);
179 static int      bnx_init_tx_ring(struct bnx_softc *);
180 static int      bnx_dma_alloc(struct bnx_softc *);
181 static void     bnx_dma_free(struct bnx_softc *);
182 static int      bnx_dma_block_alloc(struct bnx_softc *, bus_size_t,
183                     bus_dma_tag_t *, bus_dmamap_t *, void **, bus_addr_t *);
184 static void     bnx_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
185 static struct mbuf *
186                 bnx_defrag_shortdma(struct mbuf *);
187 static int      bnx_encap(struct bnx_softc *, struct mbuf **,
188                         uint32_t *, int *);
189 static int      bnx_setup_tso(struct bnx_softc *, struct mbuf **,
190                     uint16_t *, uint16_t *);
191
192 static void     bnx_reset(struct bnx_softc *);
193 static int      bnx_chipinit(struct bnx_softc *);
194 static int      bnx_blockinit(struct bnx_softc *);
195 static void     bnx_stop_block(struct bnx_softc *, bus_size_t, uint32_t);
196 static void     bnx_enable_msi(struct bnx_softc *sc);
197 static void     bnx_setmulti(struct bnx_softc *);
198 static void     bnx_setpromisc(struct bnx_softc *);
199 static void     bnx_stats_update_regs(struct bnx_softc *);
200 static uint32_t bnx_dma_swap_options(struct bnx_softc *);
201
202 static uint32_t bnx_readmem_ind(struct bnx_softc *, uint32_t);
203 static void     bnx_writemem_ind(struct bnx_softc *, uint32_t, uint32_t);
204 #ifdef notdef
205 static uint32_t bnx_readreg_ind(struct bnx_softc *, uint32_t);
206 #endif
207 static void     bnx_writereg_ind(struct bnx_softc *, uint32_t, uint32_t);
208 static void     bnx_writemem_direct(struct bnx_softc *, uint32_t, uint32_t);
209 static void     bnx_writembx(struct bnx_softc *, int, int);
210 static uint8_t  bnx_nvram_getbyte(struct bnx_softc *, int, uint8_t *);
211 static int      bnx_read_nvram(struct bnx_softc *, caddr_t, int, int);
212 static uint8_t  bnx_eeprom_getbyte(struct bnx_softc *, uint32_t, uint8_t *);
213 static int      bnx_read_eeprom(struct bnx_softc *, caddr_t, uint32_t, size_t);
214
215 static void     bnx_tbi_link_upd(struct bnx_softc *, uint32_t);
216 static void     bnx_copper_link_upd(struct bnx_softc *, uint32_t);
217 static void     bnx_autopoll_link_upd(struct bnx_softc *, uint32_t);
218 static void     bnx_link_poll(struct bnx_softc *);
219
220 static int      bnx_get_eaddr_mem(struct bnx_softc *, uint8_t[]);
221 static int      bnx_get_eaddr_nvram(struct bnx_softc *, uint8_t[]);
222 static int      bnx_get_eaddr_eeprom(struct bnx_softc *, uint8_t[]);
223 static int      bnx_get_eaddr(struct bnx_softc *, uint8_t[]);
224
225 static void     bnx_coal_change(struct bnx_softc *);
226 static int      bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS);
227 static int      bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS);
228 static int      bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS);
229 static int      bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS);
230 static int      bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS);
231 static int      bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS);
232 static int      bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *,
233                     int, int, uint32_t);
234
235 static int      bnx_msi_enable = 1;
236 TUNABLE_INT("hw.bnx.msi.enable", &bnx_msi_enable);
237
238 static device_method_t bnx_methods[] = {
239         /* Device interface */
240         DEVMETHOD(device_probe,         bnx_probe),
241         DEVMETHOD(device_attach,        bnx_attach),
242         DEVMETHOD(device_detach,        bnx_detach),
243         DEVMETHOD(device_shutdown,      bnx_shutdown),
244         DEVMETHOD(device_suspend,       bnx_suspend),
245         DEVMETHOD(device_resume,        bnx_resume),
246
247         /* bus interface */
248         DEVMETHOD(bus_print_child,      bus_generic_print_child),
249         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
250
251         /* MII interface */
252         DEVMETHOD(miibus_readreg,       bnx_miibus_readreg),
253         DEVMETHOD(miibus_writereg,      bnx_miibus_writereg),
254         DEVMETHOD(miibus_statchg,       bnx_miibus_statchg),
255
256         { 0, 0 }
257 };
258
259 static DEFINE_CLASS_0(bnx, bnx_driver, bnx_methods, sizeof(struct bnx_softc));
260 static devclass_t bnx_devclass;
261
262 DECLARE_DUMMY_MODULE(if_bnx);
263 DRIVER_MODULE(if_bnx, pci, bnx_driver, bnx_devclass, NULL, NULL);
264 DRIVER_MODULE(miibus, bnx, miibus_driver, miibus_devclass, NULL, NULL);
265
266 static uint32_t
267 bnx_readmem_ind(struct bnx_softc *sc, uint32_t off)
268 {
269         device_t dev = sc->bnx_dev;
270         uint32_t val;
271
272         if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
273             off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
274                 return 0;
275
276         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
277         val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
278         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
279         return (val);
280 }
281
282 static void
283 bnx_writemem_ind(struct bnx_softc *sc, uint32_t off, uint32_t val)
284 {
285         device_t dev = sc->bnx_dev;
286
287         if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
288             off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
289                 return;
290
291         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
292         pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
293         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
294 }
295
296 #ifdef notdef
297 static uint32_t
298 bnx_readreg_ind(struct bnx_softc *sc, uin32_t off)
299 {
300         device_t dev = sc->bnx_dev;
301
302         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
303         return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
304 }
305 #endif
306
307 static void
308 bnx_writereg_ind(struct bnx_softc *sc, uint32_t off, uint32_t val)
309 {
310         device_t dev = sc->bnx_dev;
311
312         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
313         pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
314 }
315
316 static void
317 bnx_writemem_direct(struct bnx_softc *sc, uint32_t off, uint32_t val)
318 {
319         CSR_WRITE_4(sc, off, val);
320 }
321
322 static void
323 bnx_writembx(struct bnx_softc *sc, int off, int val)
324 {
325         if (sc->bnx_asicrev == BGE_ASICREV_BCM5906)
326                 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
327
328         CSR_WRITE_4(sc, off, val);
329 }
330
331 static uint8_t
332 bnx_nvram_getbyte(struct bnx_softc *sc, int addr, uint8_t *dest)
333 {
334         uint32_t access, byte = 0;
335         int i;
336
337         /* Lock. */
338         CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
339         for (i = 0; i < 8000; i++) {
340                 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
341                         break;
342                 DELAY(20);
343         }
344         if (i == 8000)
345                 return (1);
346
347         /* Enable access. */
348         access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
349         CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
350
351         CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
352         CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
353         for (i = 0; i < BNX_TIMEOUT * 10; i++) {
354                 DELAY(10);
355                 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
356                         DELAY(10);
357                         break;
358                 }
359         }
360
361         if (i == BNX_TIMEOUT * 10) {
362                 if_printf(&sc->arpcom.ac_if, "nvram read timed out\n");
363                 return (1);
364         }
365
366         /* Get result. */
367         byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
368
369         *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
370
371         /* Disable access. */
372         CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
373
374         /* Unlock. */
375         CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
376         CSR_READ_4(sc, BGE_NVRAM_SWARB);
377
378         return (0);
379 }
380
381 /*
382  * Read a sequence of bytes from NVRAM.
383  */
384 static int
385 bnx_read_nvram(struct bnx_softc *sc, caddr_t dest, int off, int cnt)
386 {
387         int err = 0, i;
388         uint8_t byte = 0;
389
390         if (sc->bnx_asicrev != BGE_ASICREV_BCM5906)
391                 return (1);
392
393         for (i = 0; i < cnt; i++) {
394                 err = bnx_nvram_getbyte(sc, off + i, &byte);
395                 if (err)
396                         break;
397                 *(dest + i) = byte;
398         }
399
400         return (err ? 1 : 0);
401 }
402
403 /*
404  * Read a byte of data stored in the EEPROM at address 'addr.' The
405  * BCM570x supports both the traditional bitbang interface and an
406  * auto access interface for reading the EEPROM. We use the auto
407  * access method.
408  */
409 static uint8_t
410 bnx_eeprom_getbyte(struct bnx_softc *sc, uint32_t addr, uint8_t *dest)
411 {
412         int i;
413         uint32_t byte = 0;
414
415         /*
416          * Enable use of auto EEPROM access so we can avoid
417          * having to use the bitbang method.
418          */
419         BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
420
421         /* Reset the EEPROM, load the clock period. */
422         CSR_WRITE_4(sc, BGE_EE_ADDR,
423             BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
424         DELAY(20);
425
426         /* Issue the read EEPROM command. */
427         CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
428
429         /* Wait for completion */
430         for(i = 0; i < BNX_TIMEOUT * 10; i++) {
431                 DELAY(10);
432                 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
433                         break;
434         }
435
436         if (i == BNX_TIMEOUT) {
437                 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
438                 return(1);
439         }
440
441         /* Get result. */
442         byte = CSR_READ_4(sc, BGE_EE_DATA);
443
444         *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
445
446         return(0);
447 }
448
449 /*
450  * Read a sequence of bytes from the EEPROM.
451  */
452 static int
453 bnx_read_eeprom(struct bnx_softc *sc, caddr_t dest, uint32_t off, size_t len)
454 {
455         size_t i;
456         int err;
457         uint8_t byte;
458
459         for (byte = 0, err = 0, i = 0; i < len; i++) {
460                 err = bnx_eeprom_getbyte(sc, off + i, &byte);
461                 if (err)
462                         break;
463                 *(dest + i) = byte;
464         }
465
466         return(err ? 1 : 0);
467 }
468
469 static int
470 bnx_miibus_readreg(device_t dev, int phy, int reg)
471 {
472         struct bnx_softc *sc = device_get_softc(dev);
473         uint32_t val;
474         int i;
475
476         KASSERT(phy == sc->bnx_phyno,
477             ("invalid phyno %d, should be %d", phy, sc->bnx_phyno));
478
479         /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
480         if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
481                 CSR_WRITE_4(sc, BGE_MI_MODE,
482                     sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
483                 DELAY(80);
484         }
485
486         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
487             BGE_MIPHY(phy) | BGE_MIREG(reg));
488
489         /* Poll for the PHY register access to complete. */
490         for (i = 0; i < BNX_TIMEOUT; i++) {
491                 DELAY(10);
492                 val = CSR_READ_4(sc, BGE_MI_COMM);
493                 if ((val & BGE_MICOMM_BUSY) == 0) {
494                         DELAY(5);
495                         val = CSR_READ_4(sc, BGE_MI_COMM);
496                         break;
497                 }
498         }
499         if (i == BNX_TIMEOUT) {
500                 if_printf(&sc->arpcom.ac_if, "PHY read timed out "
501                     "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val);
502                 val = 0;
503         }
504
505         /* Restore the autopoll bit if necessary. */
506         if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
507                 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
508                 DELAY(80);
509         }
510
511         if (val & BGE_MICOMM_READFAIL)
512                 return 0;
513
514         return (val & 0xFFFF);
515 }
516
517 static int
518 bnx_miibus_writereg(device_t dev, int phy, int reg, int val)
519 {
520         struct bnx_softc *sc = device_get_softc(dev);
521         int i;
522
523         KASSERT(phy == sc->bnx_phyno,
524             ("invalid phyno %d, should be %d", phy, sc->bnx_phyno));
525
526         if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
527             (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
528                return 0;
529
530         /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
531         if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
532                 CSR_WRITE_4(sc, BGE_MI_MODE,
533                     sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
534                 DELAY(80);
535         }
536
537         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
538             BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
539
540         for (i = 0; i < BNX_TIMEOUT; i++) {
541                 DELAY(10);
542                 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
543                         DELAY(5);
544                         CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
545                         break;
546                 }
547         }
548         if (i == BNX_TIMEOUT) {
549                 if_printf(&sc->arpcom.ac_if, "PHY write timed out "
550                     "(phy %d, reg %d, val %d)\n", phy, reg, val);
551         }
552
553         /* Restore the autopoll bit if necessary. */
554         if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
555                 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
556                 DELAY(80);
557         }
558
559         return 0;
560 }
561
562 static void
563 bnx_miibus_statchg(device_t dev)
564 {
565         struct bnx_softc *sc;
566         struct mii_data *mii;
567
568         sc = device_get_softc(dev);
569         mii = device_get_softc(sc->bnx_miibus);
570
571         if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
572             (IFM_ACTIVE | IFM_AVALID)) {
573                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
574                 case IFM_10_T:
575                 case IFM_100_TX:
576                         sc->bnx_link = 1;
577                         break;
578                 case IFM_1000_T:
579                 case IFM_1000_SX:
580                 case IFM_2500_SX:
581                         if (sc->bnx_asicrev != BGE_ASICREV_BCM5906)
582                                 sc->bnx_link = 1;
583                         else
584                                 sc->bnx_link = 0;
585                         break;
586                 default:
587                         sc->bnx_link = 0;
588                         break;
589                 }
590         } else {
591                 sc->bnx_link = 0;
592         }
593         if (sc->bnx_link == 0)
594                 return;
595
596         BNX_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
597         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
598             IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
599                 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
600         } else {
601                 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
602         }
603
604         if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
605                 BNX_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
606         } else {
607                 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
608         }
609 }
610
611 /*
612  * Memory management for jumbo frames.
613  */
614 static int
615 bnx_alloc_jumbo_mem(struct bnx_softc *sc)
616 {
617         struct ifnet *ifp = &sc->arpcom.ac_if;
618         struct bnx_jslot *entry;
619         uint8_t *ptr;
620         bus_addr_t paddr;
621         int i, error;
622
623         /*
624          * Create tag for jumbo mbufs.
625          * This is really a bit of a kludge. We allocate a special
626          * jumbo buffer pool which (thanks to the way our DMA
627          * memory allocation works) will consist of contiguous
628          * pages. This means that even though a jumbo buffer might
629          * be larger than a page size, we don't really need to
630          * map it into more than one DMA segment. However, the
631          * default mbuf tag will result in multi-segment mappings,
632          * so we have to create a special jumbo mbuf tag that
633          * lets us get away with mapping the jumbo buffers as
634          * a single segment. I think eventually the driver should
635          * be changed so that it uses ordinary mbufs and cluster
636          * buffers, i.e. jumbo frames can span multiple DMA
637          * descriptors. But that's a project for another day.
638          */
639
640         /*
641          * Create DMA stuffs for jumbo RX ring.
642          */
643         error = bnx_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ,
644                                     &sc->bnx_cdata.bnx_rx_jumbo_ring_tag,
645                                     &sc->bnx_cdata.bnx_rx_jumbo_ring_map,
646                                     (void *)&sc->bnx_ldata.bnx_rx_jumbo_ring,
647                                     &sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
648         if (error) {
649                 if_printf(ifp, "could not create jumbo RX ring\n");
650                 return error;
651         }
652
653         /*
654          * Create DMA stuffs for jumbo buffer block.
655          */
656         error = bnx_dma_block_alloc(sc, BNX_JMEM,
657                                     &sc->bnx_cdata.bnx_jumbo_tag,
658                                     &sc->bnx_cdata.bnx_jumbo_map,
659                                     (void **)&sc->bnx_ldata.bnx_jumbo_buf,
660                                     &paddr);
661         if (error) {
662                 if_printf(ifp, "could not create jumbo buffer\n");
663                 return error;
664         }
665
666         SLIST_INIT(&sc->bnx_jfree_listhead);
667
668         /*
669          * Now divide it up into 9K pieces and save the addresses
670          * in an array. Note that we play an evil trick here by using
671          * the first few bytes in the buffer to hold the the address
672          * of the softc structure for this interface. This is because
673          * bnx_jfree() needs it, but it is called by the mbuf management
674          * code which will not pass it to us explicitly.
675          */
676         for (i = 0, ptr = sc->bnx_ldata.bnx_jumbo_buf; i < BNX_JSLOTS; i++) {
677                 entry = &sc->bnx_cdata.bnx_jslots[i];
678                 entry->bnx_sc = sc;
679                 entry->bnx_buf = ptr;
680                 entry->bnx_paddr = paddr;
681                 entry->bnx_inuse = 0;
682                 entry->bnx_slot = i;
683                 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead, entry, jslot_link);
684
685                 ptr += BNX_JLEN;
686                 paddr += BNX_JLEN;
687         }
688         return 0;
689 }
690
691 static void
692 bnx_free_jumbo_mem(struct bnx_softc *sc)
693 {
694         /* Destroy jumbo RX ring. */
695         bnx_dma_block_free(sc->bnx_cdata.bnx_rx_jumbo_ring_tag,
696                            sc->bnx_cdata.bnx_rx_jumbo_ring_map,
697                            sc->bnx_ldata.bnx_rx_jumbo_ring);
698
699         /* Destroy jumbo buffer block. */
700         bnx_dma_block_free(sc->bnx_cdata.bnx_jumbo_tag,
701                            sc->bnx_cdata.bnx_jumbo_map,
702                            sc->bnx_ldata.bnx_jumbo_buf);
703 }
704
705 /*
706  * Allocate a jumbo buffer.
707  */
708 static struct bnx_jslot *
709 bnx_jalloc(struct bnx_softc *sc)
710 {
711         struct bnx_jslot *entry;
712
713         lwkt_serialize_enter(&sc->bnx_jslot_serializer);
714         entry = SLIST_FIRST(&sc->bnx_jfree_listhead);
715         if (entry) {
716                 SLIST_REMOVE_HEAD(&sc->bnx_jfree_listhead, jslot_link);
717                 entry->bnx_inuse = 1;
718         } else {
719                 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
720         }
721         lwkt_serialize_exit(&sc->bnx_jslot_serializer);
722         return(entry);
723 }
724
725 /*
726  * Adjust usage count on a jumbo buffer.
727  */
728 static void
729 bnx_jref(void *arg)
730 {
731         struct bnx_jslot *entry = (struct bnx_jslot *)arg;
732         struct bnx_softc *sc = entry->bnx_sc;
733
734         if (sc == NULL)
735                 panic("bnx_jref: can't find softc pointer!");
736
737         if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) {
738                 panic("bnx_jref: asked to reference buffer "
739                     "that we don't manage!");
740         } else if (entry->bnx_inuse == 0) {
741                 panic("bnx_jref: buffer already free!");
742         } else {
743                 atomic_add_int(&entry->bnx_inuse, 1);
744         }
745 }
746
747 /*
748  * Release a jumbo buffer.
749  */
750 static void
751 bnx_jfree(void *arg)
752 {
753         struct bnx_jslot *entry = (struct bnx_jslot *)arg;
754         struct bnx_softc *sc = entry->bnx_sc;
755
756         if (sc == NULL)
757                 panic("bnx_jfree: can't find softc pointer!");
758
759         if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) {
760                 panic("bnx_jfree: asked to free buffer that we don't manage!");
761         } else if (entry->bnx_inuse == 0) {
762                 panic("bnx_jfree: buffer already free!");
763         } else {
764                 /*
765                  * Possible MP race to 0, use the serializer.  The atomic insn
766                  * is still needed for races against bnx_jref().
767                  */
768                 lwkt_serialize_enter(&sc->bnx_jslot_serializer);
769                 atomic_subtract_int(&entry->bnx_inuse, 1);
770                 if (entry->bnx_inuse == 0) {
771                         SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead, 
772                                           entry, jslot_link);
773                 }
774                 lwkt_serialize_exit(&sc->bnx_jslot_serializer);
775         }
776 }
777
778
779 /*
780  * Intialize a standard receive ring descriptor.
781  */
782 static int
783 bnx_newbuf_std(struct bnx_softc *sc, int i, int init)
784 {
785         struct mbuf *m_new = NULL;
786         bus_dma_segment_t seg;
787         bus_dmamap_t map;
788         int error, nsegs;
789
790         m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
791         if (m_new == NULL)
792                 return ENOBUFS;
793         m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
794         m_adj(m_new, ETHER_ALIGN);
795
796         error = bus_dmamap_load_mbuf_segment(sc->bnx_cdata.bnx_rx_mtag,
797                         sc->bnx_cdata.bnx_rx_tmpmap, m_new,
798                         &seg, 1, &nsegs, BUS_DMA_NOWAIT);
799         if (error) {
800                 m_freem(m_new);
801                 return error;
802         }
803
804         if (!init) {
805                 bus_dmamap_sync(sc->bnx_cdata.bnx_rx_mtag,
806                                 sc->bnx_cdata.bnx_rx_std_dmamap[i],
807                                 BUS_DMASYNC_POSTREAD);
808                 bus_dmamap_unload(sc->bnx_cdata.bnx_rx_mtag,
809                         sc->bnx_cdata.bnx_rx_std_dmamap[i]);
810         }
811
812         map = sc->bnx_cdata.bnx_rx_tmpmap;
813         sc->bnx_cdata.bnx_rx_tmpmap = sc->bnx_cdata.bnx_rx_std_dmamap[i];
814         sc->bnx_cdata.bnx_rx_std_dmamap[i] = map;
815
816         sc->bnx_cdata.bnx_rx_std_chain[i].bnx_mbuf = m_new;
817         sc->bnx_cdata.bnx_rx_std_chain[i].bnx_paddr = seg.ds_addr;
818
819         bnx_setup_rxdesc_std(sc, i);
820         return 0;
821 }
822
823 static void
824 bnx_setup_rxdesc_std(struct bnx_softc *sc, int i)
825 {
826         struct bnx_rxchain *rc;
827         struct bge_rx_bd *r;
828
829         rc = &sc->bnx_cdata.bnx_rx_std_chain[i];
830         r = &sc->bnx_ldata.bnx_rx_std_ring[i];
831
832         r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_paddr);
833         r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_paddr);
834         r->bge_len = rc->bnx_mbuf->m_len;
835         r->bge_idx = i;
836         r->bge_flags = BGE_RXBDFLAG_END;
837 }
838
839 /*
840  * Initialize a jumbo receive ring descriptor. This allocates
841  * a jumbo buffer from the pool managed internally by the driver.
842  */
843 static int
844 bnx_newbuf_jumbo(struct bnx_softc *sc, int i, int init)
845 {
846         struct mbuf *m_new = NULL;
847         struct bnx_jslot *buf;
848         bus_addr_t paddr;
849
850         /* Allocate the mbuf. */
851         MGETHDR(m_new, init ? MB_WAIT : MB_DONTWAIT, MT_DATA);
852         if (m_new == NULL)
853                 return ENOBUFS;
854
855         /* Allocate the jumbo buffer */
856         buf = bnx_jalloc(sc);
857         if (buf == NULL) {
858                 m_freem(m_new);
859                 return ENOBUFS;
860         }
861
862         /* Attach the buffer to the mbuf. */
863         m_new->m_ext.ext_arg = buf;
864         m_new->m_ext.ext_buf = buf->bnx_buf;
865         m_new->m_ext.ext_free = bnx_jfree;
866         m_new->m_ext.ext_ref = bnx_jref;
867         m_new->m_ext.ext_size = BNX_JUMBO_FRAMELEN;
868
869         m_new->m_flags |= M_EXT;
870
871         m_new->m_data = m_new->m_ext.ext_buf;
872         m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
873
874         paddr = buf->bnx_paddr;
875         m_adj(m_new, ETHER_ALIGN);
876         paddr += ETHER_ALIGN;
877
878         /* Save necessary information */
879         sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_mbuf = m_new;
880         sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_paddr = paddr;
881
882         /* Set up the descriptor. */
883         bnx_setup_rxdesc_jumbo(sc, i);
884         return 0;
885 }
886
887 static void
888 bnx_setup_rxdesc_jumbo(struct bnx_softc *sc, int i)
889 {
890         struct bge_rx_bd *r;
891         struct bnx_rxchain *rc;
892
893         r = &sc->bnx_ldata.bnx_rx_jumbo_ring[i];
894         rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i];
895
896         r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_paddr);
897         r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_paddr);
898         r->bge_len = rc->bnx_mbuf->m_len;
899         r->bge_idx = i;
900         r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
901 }
902
903 static int
904 bnx_init_rx_ring_std(struct bnx_softc *sc)
905 {
906         int i, error;
907
908         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
909                 error = bnx_newbuf_std(sc, i, 1);
910                 if (error)
911                         return error;
912         }
913
914         sc->bnx_std = BGE_STD_RX_RING_CNT - 1;
915         bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bnx_std);
916
917         return(0);
918 }
919
920 static void
921 bnx_free_rx_ring_std(struct bnx_softc *sc)
922 {
923         int i;
924
925         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
926                 struct bnx_rxchain *rc = &sc->bnx_cdata.bnx_rx_std_chain[i];
927
928                 if (rc->bnx_mbuf != NULL) {
929                         bus_dmamap_unload(sc->bnx_cdata.bnx_rx_mtag,
930                                           sc->bnx_cdata.bnx_rx_std_dmamap[i]);
931                         m_freem(rc->bnx_mbuf);
932                         rc->bnx_mbuf = NULL;
933                 }
934                 bzero(&sc->bnx_ldata.bnx_rx_std_ring[i],
935                     sizeof(struct bge_rx_bd));
936         }
937 }
938
939 static int
940 bnx_init_rx_ring_jumbo(struct bnx_softc *sc)
941 {
942         struct bge_rcb *rcb;
943         int i, error;
944
945         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
946                 error = bnx_newbuf_jumbo(sc, i, 1);
947                 if (error)
948                         return error;
949         }
950
951         sc->bnx_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
952
953         rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb;
954         rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
955         CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
956
957         bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo);
958
959         return(0);
960 }
961
962 static void
963 bnx_free_rx_ring_jumbo(struct bnx_softc *sc)
964 {
965         int i;
966
967         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
968                 struct bnx_rxchain *rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i];
969
970                 if (rc->bnx_mbuf != NULL) {
971                         m_freem(rc->bnx_mbuf);
972                         rc->bnx_mbuf = NULL;
973                 }
974                 bzero(&sc->bnx_ldata.bnx_rx_jumbo_ring[i],
975                     sizeof(struct bge_rx_bd));
976         }
977 }
978
979 static void
980 bnx_free_tx_ring(struct bnx_softc *sc)
981 {
982         int i;
983
984         for (i = 0; i < BGE_TX_RING_CNT; i++) {
985                 if (sc->bnx_cdata.bnx_tx_chain[i] != NULL) {
986                         bus_dmamap_unload(sc->bnx_cdata.bnx_tx_mtag,
987                                           sc->bnx_cdata.bnx_tx_dmamap[i]);
988                         m_freem(sc->bnx_cdata.bnx_tx_chain[i]);
989                         sc->bnx_cdata.bnx_tx_chain[i] = NULL;
990                 }
991                 bzero(&sc->bnx_ldata.bnx_tx_ring[i],
992                     sizeof(struct bge_tx_bd));
993         }
994 }
995
996 static int
997 bnx_init_tx_ring(struct bnx_softc *sc)
998 {
999         sc->bnx_txcnt = 0;
1000         sc->bnx_tx_saved_considx = 0;
1001         sc->bnx_tx_prodidx = 0;
1002
1003         /* Initialize transmit producer index for host-memory send ring. */
1004         bnx_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bnx_tx_prodidx);
1005         bnx_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1006
1007         return(0);
1008 }
1009
1010 static void
1011 bnx_setmulti(struct bnx_softc *sc)
1012 {
1013         struct ifnet *ifp;
1014         struct ifmultiaddr *ifma;
1015         uint32_t hashes[4] = { 0, 0, 0, 0 };
1016         int h, i;
1017
1018         ifp = &sc->arpcom.ac_if;
1019
1020         if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1021                 for (i = 0; i < 4; i++)
1022                         CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1023                 return;
1024         }
1025
1026         /* First, zot all the existing filters. */
1027         for (i = 0; i < 4; i++)
1028                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1029
1030         /* Now program new ones. */
1031         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1032                 if (ifma->ifma_addr->sa_family != AF_LINK)
1033                         continue;
1034                 h = ether_crc32_le(
1035                     LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1036                     ETHER_ADDR_LEN) & 0x7f;
1037                 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1038         }
1039
1040         for (i = 0; i < 4; i++)
1041                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1042 }
1043
1044 /*
1045  * Do endian, PCI and DMA initialization. Also check the on-board ROM
1046  * self-test results.
1047  */
1048 static int
1049 bnx_chipinit(struct bnx_softc *sc)
1050 {
1051         uint32_t dma_rw_ctl, mode_ctl;
1052         int i;
1053
1054         /* Set endian type before we access any non-PCI registers. */
1055         pci_write_config(sc->bnx_dev, BGE_PCI_MISC_CTL,
1056             BGE_INIT | BGE_PCIMISCCTL_TAGGED_STATUS, 4);
1057
1058         /* Clear the MAC control register */
1059         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1060
1061         /*
1062          * Clear the MAC statistics block in the NIC's
1063          * internal memory.
1064          */
1065         for (i = BGE_STATS_BLOCK;
1066             i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1067                 BNX_MEMWIN_WRITE(sc, i, 0);
1068
1069         for (i = BGE_STATUS_BLOCK;
1070             i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1071                 BNX_MEMWIN_WRITE(sc, i, 0);
1072
1073         if (BNX_IS_57765_FAMILY(sc)) {
1074                 uint32_t val;
1075
1076                 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) {
1077                         mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
1078                         val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS;
1079
1080                         /* Access the lower 1K of PL PCI-E block registers. */
1081                         CSR_WRITE_4(sc, BGE_MODE_CTL,
1082                             val | BGE_MODECTL_PCIE_PL_SEL);
1083
1084                         val = CSR_READ_4(sc, BGE_PCIE_PL_LO_PHYCTL5);
1085                         val |= BGE_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ;
1086                         CSR_WRITE_4(sc, BGE_PCIE_PL_LO_PHYCTL5, val);
1087
1088                         CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1089                 }
1090                 if (sc->bnx_chiprev != BGE_CHIPREV_57765_AX) {
1091                         mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
1092                         val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS;
1093
1094                         /* Access the lower 1K of DL PCI-E block registers. */
1095                         CSR_WRITE_4(sc, BGE_MODE_CTL,
1096                             val | BGE_MODECTL_PCIE_DL_SEL);
1097
1098                         val = CSR_READ_4(sc, BGE_PCIE_DL_LO_FTSMAX);
1099                         val &= ~BGE_PCIE_DL_LO_FTSMAX_MASK;
1100                         val |= BGE_PCIE_DL_LO_FTSMAX_VAL;
1101                         CSR_WRITE_4(sc, BGE_PCIE_DL_LO_FTSMAX, val);
1102
1103                         CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1104                 }
1105
1106                 val = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK);
1107                 val &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK;
1108                 val |= BGE_CPMU_LSPD_10MB_MACCLK_6_25;
1109                 CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, val);
1110         }
1111
1112         /*
1113          * Set up the PCI DMA control register.
1114          */
1115         dma_rw_ctl = pci_read_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, 4);
1116         /*
1117          * Disable 32bytes cache alignment for DMA write to host memory
1118          *
1119          * NOTE:
1120          * 64bytes cache alignment for DMA write to host memory is still
1121          * enabled.
1122          */
1123         dma_rw_ctl |= BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1124         if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0)
1125                 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1126         /*
1127          * Enable HW workaround for controllers that misinterpret
1128          * a status tag update and leave interrupts permanently
1129          * disabled.
1130          */
1131         if (sc->bnx_asicrev != BGE_ASICREV_BCM5717 &&
1132             !BNX_IS_57765_FAMILY(sc))
1133                 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1134         if (bootverbose) {
1135                 if_printf(&sc->arpcom.ac_if, "DMA read/write %#x\n",
1136                     dma_rw_ctl);
1137         }
1138         pci_write_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1139
1140         /*
1141          * Set up general mode register.
1142          */
1143         mode_ctl = bnx_dma_swap_options(sc) | BGE_MODECTL_MAC_ATTN_INTR |
1144             BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM;
1145         CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1146
1147         /*
1148          * Disable memory write invalidate.  Apparently it is not supported
1149          * properly by these devices.  Also ensure that INTx isn't disabled,
1150          * as these chips need it even when using MSI.
1151          */
1152         PCI_CLRBIT(sc->bnx_dev, BGE_PCI_CMD,
1153             (PCIM_CMD_MWRICEN | PCIM_CMD_INTxDIS), 4);
1154
1155         /* Set the timer prescaler (always 66Mhz) */
1156         CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1157
1158         if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
1159                 DELAY(40);      /* XXX */
1160
1161                 /* Put PHY into ready state */
1162                 BNX_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1163                 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1164                 DELAY(40);
1165         }
1166
1167         return(0);
1168 }
1169
1170 static int
1171 bnx_blockinit(struct bnx_softc *sc)
1172 {
1173         struct bge_rcb *rcb;
1174         bus_size_t vrcb;
1175         bge_hostaddr taddr;
1176         uint32_t val;
1177         int i, limit;
1178
1179         /*
1180          * Initialize the memory window pointer register so that
1181          * we can access the first 32K of internal NIC RAM. This will
1182          * allow us to set up the TX send ring RCBs and the RX return
1183          * ring RCBs, plus other things which live in NIC memory.
1184          */
1185         CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1186
1187         /* Configure mbuf pool watermarks */
1188         if (BNX_IS_57765_PLUS(sc)) {
1189                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1190                 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) {
1191                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1192                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1193                 } else {
1194                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1195                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1196                 }
1197         } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
1198                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1199                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1200                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1201         } else {
1202                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1203                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1204                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1205         }
1206
1207         /* Configure DMA resource watermarks */
1208         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1209         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1210
1211         /* Enable buffer manager */
1212         val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1213         /*
1214          * Change the arbitration algorithm of TXMBUF read request to
1215          * round-robin instead of priority based for BCM5719.  When
1216          * TXFIFO is almost empty, RDMA will hold its request until
1217          * TXFIFO is not almost empty.
1218          */
1219         if (sc->bnx_asicrev == BGE_ASICREV_BCM5719)
1220                 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
1221         if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1222             sc->bnx_chipid == BGE_CHIPID_BCM5719_A0 ||
1223             sc->bnx_chipid == BGE_CHIPID_BCM5720_A0)
1224                 val |= BGE_BMANMODE_LOMBUF_ATTN;
1225         CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1226
1227         /* Poll for buffer manager start indication */
1228         for (i = 0; i < BNX_TIMEOUT; i++) {
1229                 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1230                         break;
1231                 DELAY(10);
1232         }
1233
1234         if (i == BNX_TIMEOUT) {
1235                 if_printf(&sc->arpcom.ac_if,
1236                           "buffer manager failed to start\n");
1237                 return(ENXIO);
1238         }
1239
1240         /* Enable flow-through queues */
1241         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1242         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1243
1244         /* Wait until queue initialization is complete */
1245         for (i = 0; i < BNX_TIMEOUT; i++) {
1246                 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1247                         break;
1248                 DELAY(10);
1249         }
1250
1251         if (i == BNX_TIMEOUT) {
1252                 if_printf(&sc->arpcom.ac_if,
1253                           "flow-through queue init failed\n");
1254                 return(ENXIO);
1255         }
1256
1257         /*
1258          * Summary of rings supported by the controller:
1259          *
1260          * Standard Receive Producer Ring
1261          * - This ring is used to feed receive buffers for "standard"
1262          *   sized frames (typically 1536 bytes) to the controller.
1263          *
1264          * Jumbo Receive Producer Ring
1265          * - This ring is used to feed receive buffers for jumbo sized
1266          *   frames (i.e. anything bigger than the "standard" frames)
1267          *   to the controller.
1268          *
1269          * Mini Receive Producer Ring
1270          * - This ring is used to feed receive buffers for "mini"
1271          *   sized frames to the controller.
1272          * - This feature required external memory for the controller
1273          *   but was never used in a production system.  Should always
1274          *   be disabled.
1275          *
1276          * Receive Return Ring
1277          * - After the controller has placed an incoming frame into a
1278          *   receive buffer that buffer is moved into a receive return
1279          *   ring.  The driver is then responsible to passing the
1280          *   buffer up to the stack.  Many versions of the controller
1281          *   support multiple RR rings.
1282          *
1283          * Send Ring
1284          * - This ring is used for outgoing frames.  Many versions of
1285          *   the controller support multiple send rings.
1286          */
1287
1288         /* Initialize the standard receive producer ring control block. */
1289         rcb = &sc->bnx_ldata.bnx_info.bnx_std_rx_rcb;
1290         rcb->bge_hostaddr.bge_addr_lo =
1291             BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_std_ring_paddr);
1292         rcb->bge_hostaddr.bge_addr_hi =
1293             BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_std_ring_paddr);
1294         if (BNX_IS_57765_PLUS(sc)) {
1295                 /*
1296                  * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1297                  * Bits 15-2 : Maximum RX frame size
1298                  * Bit 1     : 1 = Ring Disabled, 0 = Ring ENabled
1299                  * Bit 0     : Reserved
1300                  */
1301                 rcb->bge_maxlen_flags =
1302                     BGE_RCB_MAXLEN_FLAGS(512, BNX_MAX_FRAMELEN << 2);
1303         } else {
1304                 /*
1305                  * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1306                  * Bits 15-2 : Reserved (should be 0)
1307                  * Bit 1     : 1 = Ring Disabled, 0 = Ring Enabled
1308                  * Bit 0     : Reserved
1309                  */
1310                 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1311         }
1312         if (BNX_IS_5717_PLUS(sc))
1313                 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1314         else
1315                 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1316         /* Write the standard receive producer ring control block. */
1317         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1318         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1319         CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1320         CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1321         /* Reset the standard receive producer ring producer index. */
1322         bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1323
1324         /*
1325          * Initialize the jumbo RX producer ring control
1326          * block.  We set the 'ring disabled' bit in the
1327          * flags field until we're actually ready to start
1328          * using this ring (i.e. once we set the MTU
1329          * high enough to require it).
1330          */
1331         if (BNX_IS_JUMBO_CAPABLE(sc)) {
1332                 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb;
1333                 /* Get the jumbo receive producer ring RCB parameters. */
1334                 rcb->bge_hostaddr.bge_addr_lo =
1335                     BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
1336                 rcb->bge_hostaddr.bge_addr_hi =
1337                     BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
1338                 rcb->bge_maxlen_flags =
1339                     BGE_RCB_MAXLEN_FLAGS(BNX_MAX_FRAMELEN,
1340                     BGE_RCB_FLAG_RING_DISABLED);
1341                 if (BNX_IS_5717_PLUS(sc))
1342                         rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1343                 else
1344                         rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1345                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1346                     rcb->bge_hostaddr.bge_addr_hi);
1347                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1348                     rcb->bge_hostaddr.bge_addr_lo);
1349                 /* Program the jumbo receive producer ring RCB parameters. */
1350                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1351                     rcb->bge_maxlen_flags);
1352                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1353                 /* Reset the jumbo receive producer ring producer index. */
1354                 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1355         }
1356
1357         /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
1358         if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
1359             (sc->bnx_chipid == BGE_CHIPID_BCM5906_A0 ||
1360              sc->bnx_chipid == BGE_CHIPID_BCM5906_A1 ||
1361              sc->bnx_chipid == BGE_CHIPID_BCM5906_A2)) {
1362                 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1363                     (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1364         }
1365
1366         /*
1367          * The BD ring replenish thresholds control how often the
1368          * hardware fetches new BD's from the producer rings in host
1369          * memory.  Setting the value too low on a busy system can
1370          * starve the hardware and recue the throughpout.
1371          *
1372          * Set the BD ring replentish thresholds. The recommended
1373          * values are 1/8th the number of descriptors allocated to
1374          * each ring.
1375          */
1376         val = 8;
1377         CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1378         if (BNX_IS_JUMBO_CAPABLE(sc)) {
1379                 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1380                     BGE_JUMBO_RX_RING_CNT/8);
1381         }
1382         if (BNX_IS_57765_PLUS(sc)) {
1383                 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1384                 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1385         }
1386
1387         /*
1388          * Disable all send rings by setting the 'ring disabled' bit
1389          * in the flags field of all the TX send ring control blocks,
1390          * located in NIC memory.
1391          */
1392         if (BNX_IS_5717_PLUS(sc))
1393                 limit = 4;
1394         else if (BNX_IS_57765_FAMILY(sc))
1395                 limit = 2;
1396         else
1397                 limit = 1;
1398         vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1399         for (i = 0; i < limit; i++) {
1400                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1401                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1402                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1403                 vrcb += sizeof(struct bge_rcb);
1404         }
1405
1406         /* Configure send ring RCB 0 (we use only the first ring) */
1407         vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1408         BGE_HOSTADDR(taddr, sc->bnx_ldata.bnx_tx_ring_paddr);
1409         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1410         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1411         if (BNX_IS_5717_PLUS(sc)) {
1412                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
1413         } else {
1414                 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1415                     BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1416         }
1417         RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1418             BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1419
1420         /*
1421          * Disable all receive return rings by setting the
1422          * 'ring disabled' bit in the flags field of all the receive
1423          * return ring control blocks, located in NIC memory.
1424          */
1425         if (BNX_IS_5717_PLUS(sc)) {
1426                 /* Should be 17, use 16 until we get an SRAM map. */
1427                 limit = 16;
1428         } else if (BNX_IS_57765_FAMILY(sc)) {
1429                 limit = 4;
1430         } else {
1431                 limit = 1;
1432         }
1433         /* Disable all receive return rings. */
1434         vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1435         for (i = 0; i < limit; i++) {
1436                 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1437                 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1438                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1439                     BGE_RCB_FLAG_RING_DISABLED);
1440                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1441                 bnx_writembx(sc, BGE_MBX_RX_CONS0_LO +
1442                     (i * (sizeof(uint64_t))), 0);
1443                 vrcb += sizeof(struct bge_rcb);
1444         }
1445
1446         /*
1447          * Set up receive return ring 0.  Note that the NIC address
1448          * for RX return rings is 0x0.  The return rings live entirely
1449          * within the host, so the nicaddr field in the RCB isn't used.
1450          */
1451         vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1452         BGE_HOSTADDR(taddr, sc->bnx_ldata.bnx_rx_return_ring_paddr);
1453         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1454         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1455         RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1456         RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1457             BGE_RCB_MAXLEN_FLAGS(sc->bnx_return_ring_cnt, 0));
1458
1459         /* Set random backoff seed for TX */
1460         CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1461             sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1462             sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1463             sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1464             BGE_TX_BACKOFF_SEED_MASK);
1465
1466         /* Set inter-packet gap */
1467         val = 0x2620;
1468         if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1469                 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
1470                     (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
1471         }
1472         CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
1473
1474         /*
1475          * Specify which ring to use for packets that don't match
1476          * any RX rules.
1477          */
1478         CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1479
1480         /*
1481          * Configure number of RX lists. One interrupt distribution
1482          * list, sixteen active lists, one bad frames class.
1483          */
1484         CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1485
1486         /* Inialize RX list placement stats mask. */
1487         CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1488         CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1489
1490         /* Disable host coalescing until we get it set up */
1491         CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1492
1493         /* Poll to make sure it's shut down. */
1494         for (i = 0; i < BNX_TIMEOUT; i++) {
1495                 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1496                         break;
1497                 DELAY(10);
1498         }
1499
1500         if (i == BNX_TIMEOUT) {
1501                 if_printf(&sc->arpcom.ac_if,
1502                           "host coalescing engine failed to idle\n");
1503                 return(ENXIO);
1504         }
1505
1506         /* Set up host coalescing defaults */
1507         CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bnx_rx_coal_ticks);
1508         CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bnx_tx_coal_ticks);
1509         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bnx_rx_coal_bds);
1510         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bnx_tx_coal_bds);
1511         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, sc->bnx_rx_coal_bds_int);
1512         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, sc->bnx_tx_coal_bds_int);
1513
1514         /* Set up address of status block */
1515         bzero(sc->bnx_ldata.bnx_status_block, BGE_STATUS_BLK_SZ);
1516         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1517             BGE_ADDR_HI(sc->bnx_ldata.bnx_status_block_paddr));
1518         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1519             BGE_ADDR_LO(sc->bnx_ldata.bnx_status_block_paddr));
1520
1521         /* Set up status block partail update size. */
1522         val = BGE_STATBLKSZ_32BYTE;
1523 #if 0
1524         /*
1525          * Does not seem to have visible effect in both
1526          * bulk data (1472B UDP datagram) and tiny data
1527          * (18B UDP datagram) TX tests.
1528          */
1529         val |= BGE_HCCMODE_CLRTICK_TX;
1530 #endif
1531         /* Turn on host coalescing state machine */
1532         CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1533
1534         /* Turn on RX BD completion state machine and enable attentions */
1535         CSR_WRITE_4(sc, BGE_RBDC_MODE,
1536             BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1537
1538         /* Turn on RX list placement state machine */
1539         CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1540
1541         val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1542             BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1543             BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1544             BGE_MACMODE_FRMHDR_DMA_ENB;
1545
1546         if (sc->bnx_flags & BNX_FLAG_TBI)
1547                 val |= BGE_PORTMODE_TBI;
1548         else if (sc->bnx_flags & BNX_FLAG_MII_SERDES)
1549                 val |= BGE_PORTMODE_GMII;
1550         else
1551                 val |= BGE_PORTMODE_MII;
1552
1553         /* Turn on DMA, clear stats */
1554         CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1555
1556         /* Set misc. local control, enable interrupts on attentions */
1557         CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1558
1559 #ifdef notdef
1560         /* Assert GPIO pins for PHY reset */
1561         BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1562             BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1563         BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1564             BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1565 #endif
1566
1567         /* Turn on write DMA state machine */
1568         val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1569         /* Enable host coalescing bug fix. */
1570         val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1571         if (sc->bnx_asicrev == BGE_ASICREV_BCM5785) {
1572                 /* Request larger DMA burst size to get better performance. */
1573                 val |= BGE_WDMAMODE_BURST_ALL_DATA;
1574         }
1575         CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1576         DELAY(40);
1577
1578         if (BNX_IS_57765_PLUS(sc)) {
1579                 uint32_t dmactl;
1580
1581                 dmactl = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL);
1582                 /*
1583                  * Adjust tx margin to prevent TX data corruption and
1584                  * fix internal FIFO overflow.
1585                  */
1586                 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1587                     sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1588                         dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
1589                             BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
1590                             BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
1591                         dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
1592                             BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
1593                             BGE_RDMA_RSRVCTRL_TXMRGN_320B;
1594                 }
1595                 /*
1596                  * Enable fix for read DMA FIFO overruns.
1597                  * The fix is to limit the number of RX BDs
1598                  * the hardware would fetch at a fime.
1599                  */
1600                 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL,
1601                     dmactl | BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
1602         }
1603
1604         if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) {
1605                 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
1606                     CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
1607                     BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
1608                     BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
1609         } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1610                 /*
1611                  * Allow 4KB burst length reads for non-LSO frames.
1612                  * Enable 512B burst length reads for buffer descriptors.
1613                  */
1614                 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
1615                     CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
1616                     BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
1617                     BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
1618         }
1619
1620         /* Turn on read DMA state machine */
1621         val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1622         if (sc->bnx_asicrev == BGE_ASICREV_BCM5717)
1623                 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
1624         if (sc->bnx_asicrev == BGE_ASICREV_BCM5784 ||
1625             sc->bnx_asicrev == BGE_ASICREV_BCM5785 ||
1626             sc->bnx_asicrev == BGE_ASICREV_BCM57780) {
1627                 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1628                     BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1629                     BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1630         }
1631         if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1632                 val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
1633                     BGE_RDMAMODE_H2BNC_VLAN_DET;
1634                 /*
1635                  * Allow multiple outstanding read requests from
1636                  * non-LSO read DMA engine.
1637                  */
1638                 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
1639         }
1640         if (sc->bnx_flags & BNX_FLAG_TSO)
1641                 val |= BGE_RDMAMODE_TSO4_ENABLE;
1642         val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1643         CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1644         DELAY(40);
1645
1646         /* Turn on RX data completion state machine */
1647         CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1648
1649         /* Turn on RX BD initiator state machine */
1650         CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1651
1652         /* Turn on RX data and RX BD initiator state machine */
1653         CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1654
1655         /* Turn on send BD completion state machine */
1656         CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1657
1658         /* Turn on send data completion state machine */
1659         val = BGE_SDCMODE_ENABLE;
1660         if (sc->bnx_asicrev == BGE_ASICREV_BCM5761)
1661                 val |= BGE_SDCMODE_CDELAY; 
1662         CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1663
1664         /* Turn on send data initiator state machine */
1665         if (sc->bnx_flags & BNX_FLAG_TSO) {
1666                 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
1667                     BGE_SDIMODE_HW_LSO_PRE_DMA);
1668         } else {
1669                 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1670         }
1671
1672         /* Turn on send BD initiator state machine */
1673         CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1674
1675         /* Turn on send BD selector state machine */
1676         CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1677
1678         CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1679         CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1680             BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1681
1682         /* ack/clear link change events */
1683         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1684             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1685             BGE_MACSTAT_LINK_CHANGED);
1686         CSR_WRITE_4(sc, BGE_MI_STS, 0);
1687
1688         /*
1689          * Enable attention when the link has changed state for
1690          * devices that use auto polling.
1691          */
1692         if (sc->bnx_flags & BNX_FLAG_TBI) {
1693                 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1694         } else {
1695                 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
1696                         CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
1697                         DELAY(80);
1698                 }
1699         }
1700
1701         /*
1702          * Clear any pending link state attention.
1703          * Otherwise some link state change events may be lost until attention
1704          * is cleared by bnx_intr() -> bnx_softc.bnx_link_upd() sequence.
1705          * It's not necessary on newer BCM chips - perhaps enabling link
1706          * state change attentions implies clearing pending attention.
1707          */
1708         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1709             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1710             BGE_MACSTAT_LINK_CHANGED);
1711
1712         /* Enable link state change attentions. */
1713         BNX_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1714
1715         return(0);
1716 }
1717
1718 /*
1719  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1720  * against our list and return its name if we find a match. Note
1721  * that since the Broadcom controller contains VPD support, we
1722  * can get the device name string from the controller itself instead
1723  * of the compiled-in string. This is a little slow, but it guarantees
1724  * we'll always announce the right product name.
1725  */
1726 static int
1727 bnx_probe(device_t dev)
1728 {
1729         const struct bnx_type *t;
1730         uint16_t product, vendor;
1731
1732         if (!pci_is_pcie(dev))
1733                 return ENXIO;
1734
1735         product = pci_get_device(dev);
1736         vendor = pci_get_vendor(dev);
1737
1738         for (t = bnx_devs; t->bnx_name != NULL; t++) {
1739                 if (vendor == t->bnx_vid && product == t->bnx_did)
1740                         break;
1741         }
1742         if (t->bnx_name == NULL)
1743                 return ENXIO;
1744
1745         device_set_desc(dev, t->bnx_name);
1746         return 0;
1747 }
1748
1749 static int
1750 bnx_attach(device_t dev)
1751 {
1752         struct ifnet *ifp;
1753         struct bnx_softc *sc;
1754         uint32_t hwcfg = 0, misccfg;
1755         int error = 0, rid, capmask;
1756         uint8_t ether_addr[ETHER_ADDR_LEN];
1757         uint16_t product;
1758         driver_intr_t *intr_func;
1759         uintptr_t mii_priv = 0;
1760         u_int intr_flags;
1761 #ifdef BNX_TSO_DEBUG
1762         char desc[32];
1763         int i;
1764 #endif
1765
1766         sc = device_get_softc(dev);
1767         sc->bnx_dev = dev;
1768         callout_init_mp(&sc->bnx_stat_timer);
1769         callout_init_mp(&sc->bnx_intr_timer);
1770         lwkt_serialize_init(&sc->bnx_jslot_serializer);
1771
1772         product = pci_get_device(dev);
1773
1774 #ifndef BURN_BRIDGES
1775         if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1776                 uint32_t irq, mem;
1777
1778                 irq = pci_read_config(dev, PCIR_INTLINE, 4);
1779                 mem = pci_read_config(dev, BGE_PCI_BAR0, 4);
1780
1781                 device_printf(dev, "chip is in D%d power mode "
1782                     "-- setting to D0\n", pci_get_powerstate(dev));
1783
1784                 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1785
1786                 pci_write_config(dev, PCIR_INTLINE, irq, 4);
1787                 pci_write_config(dev, BGE_PCI_BAR0, mem, 4);
1788         }
1789 #endif  /* !BURN_BRIDGE */
1790
1791         /*
1792          * Map control/status registers.
1793          */
1794         pci_enable_busmaster(dev);
1795
1796         rid = BGE_PCI_BAR0;
1797         sc->bnx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1798             RF_ACTIVE);
1799
1800         if (sc->bnx_res == NULL) {
1801                 device_printf(dev, "couldn't map memory\n");
1802                 return ENXIO;
1803         }
1804
1805         sc->bnx_btag = rman_get_bustag(sc->bnx_res);
1806         sc->bnx_bhandle = rman_get_bushandle(sc->bnx_res);
1807
1808         /* Save various chip information */
1809         sc->bnx_chipid =
1810             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
1811             BGE_PCIMISCCTL_ASICREV_SHIFT;
1812         if (BGE_ASICREV(sc->bnx_chipid) == BGE_ASICREV_USE_PRODID_REG) {
1813                 /* All chips having dedicated ASICREV register have CPMU */
1814                 sc->bnx_flags |= BNX_FLAG_CPMU;
1815
1816                 switch (product) {
1817                 case PCI_PRODUCT_BROADCOM_BCM5717:
1818                 case PCI_PRODUCT_BROADCOM_BCM5718:
1819                 case PCI_PRODUCT_BROADCOM_BCM5719:
1820                 case PCI_PRODUCT_BROADCOM_BCM5720_ALT:
1821                         sc->bnx_chipid = pci_read_config(dev,
1822                             BGE_PCI_GEN2_PRODID_ASICREV, 4);
1823                         break;
1824
1825                 case PCI_PRODUCT_BROADCOM_BCM57761:
1826                 case PCI_PRODUCT_BROADCOM_BCM57762:
1827                 case PCI_PRODUCT_BROADCOM_BCM57765:
1828                 case PCI_PRODUCT_BROADCOM_BCM57766:
1829                 case PCI_PRODUCT_BROADCOM_BCM57781:
1830                 case PCI_PRODUCT_BROADCOM_BCM57782:
1831                 case PCI_PRODUCT_BROADCOM_BCM57785:
1832                 case PCI_PRODUCT_BROADCOM_BCM57786:
1833                 case PCI_PRODUCT_BROADCOM_BCM57791:
1834                 case PCI_PRODUCT_BROADCOM_BCM57795:
1835                         sc->bnx_chipid = pci_read_config(dev,
1836                             BGE_PCI_GEN15_PRODID_ASICREV, 4);
1837                         break;
1838
1839                 default:
1840                         sc->bnx_chipid = pci_read_config(dev,
1841                             BGE_PCI_PRODID_ASICREV, 4);
1842                         break;
1843                 }
1844         }
1845         sc->bnx_asicrev = BGE_ASICREV(sc->bnx_chipid);
1846         sc->bnx_chiprev = BGE_CHIPREV(sc->bnx_chipid);
1847
1848         switch (sc->bnx_asicrev) {
1849         case BGE_ASICREV_BCM5717:
1850         case BGE_ASICREV_BCM5719:
1851         case BGE_ASICREV_BCM5720:
1852                 sc->bnx_flags |= BNX_FLAG_5717_PLUS | BNX_FLAG_57765_PLUS;
1853                 break;
1854
1855         case BGE_ASICREV_BCM57765:
1856         case BGE_ASICREV_BCM57766:
1857                 sc->bnx_flags |= BNX_FLAG_57765_FAMILY | BNX_FLAG_57765_PLUS;
1858                 break;
1859         }
1860         sc->bnx_flags |= BNX_FLAG_SHORTDMA;
1861
1862         sc->bnx_flags |= BNX_FLAG_TSO;
1863         if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 &&
1864             sc->bnx_chipid == BGE_CHIPID_BCM5719_A0)
1865                 sc->bnx_flags &= ~BNX_FLAG_TSO;
1866
1867         if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1868             BNX_IS_57765_FAMILY(sc)) {
1869                 /*
1870                  * All BCM57785 and BCM5718 families chips have a bug that
1871                  * under certain situation interrupt will not be enabled
1872                  * even if status tag is written to BGE_MBX_IRQ0_LO mailbox.
1873                  *
1874                  * While BCM5719 and BCM5720 have a hardware workaround
1875                  * which could fix the above bug.
1876                  * See the comment near BGE_PCIDMARWCTL_TAGGED_STATUS_WA in
1877                  * bnx_chipinit().
1878                  *
1879                  * For the rest of the chips in these two families, we will
1880                  * have to poll the status block at high rate (10ms currently)
1881                  * to check whether the interrupt is hosed or not.
1882                  * See bnx_intr_check() for details.
1883                  */
1884                 sc->bnx_flags |= BNX_FLAG_STATUSTAG_BUG;
1885         }
1886
1887         misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK;
1888
1889         sc->bnx_pciecap = pci_get_pciecap_ptr(sc->bnx_dev);
1890         if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1891             sc->bnx_asicrev == BGE_ASICREV_BCM5720)
1892                 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_2048);
1893         else
1894                 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096);
1895         device_printf(dev, "CHIP ID 0x%08x; "
1896                       "ASIC REV 0x%02x; CHIP REV 0x%02x\n",
1897                       sc->bnx_chipid, sc->bnx_asicrev, sc->bnx_chiprev);
1898
1899         /*
1900          * Set various PHY quirk flags.
1901          */
1902
1903         capmask = MII_CAPMASK_DEFAULT;
1904         if (product == PCI_PRODUCT_BROADCOM_BCM57791 ||
1905             product == PCI_PRODUCT_BROADCOM_BCM57795) {
1906                 /* 10/100 only */
1907                 capmask &= ~BMSR_EXTSTAT;
1908         }
1909
1910         mii_priv |= BRGPHY_FLAG_WIRESPEED;
1911
1912         /*
1913          * Allocate interrupt
1914          */
1915         sc->bnx_irq_type = pci_alloc_1intr(dev, bnx_msi_enable, &sc->bnx_irq_rid,
1916             &intr_flags);
1917
1918         sc->bnx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->bnx_irq_rid,
1919             intr_flags);
1920         if (sc->bnx_irq == NULL) {
1921                 device_printf(dev, "couldn't map interrupt\n");
1922                 error = ENXIO;
1923                 goto fail;
1924         }
1925
1926         if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) {
1927                 sc->bnx_flags |= BNX_FLAG_ONESHOT_MSI;
1928                 bnx_enable_msi(sc);
1929         }
1930
1931         /* Initialize if_name earlier, so if_printf could be used */
1932         ifp = &sc->arpcom.ac_if;
1933         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1934
1935         /* Try to reset the chip. */
1936         bnx_reset(sc);
1937
1938         if (bnx_chipinit(sc)) {
1939                 device_printf(dev, "chip initialization failed\n");
1940                 error = ENXIO;
1941                 goto fail;
1942         }
1943
1944         /*
1945          * Get station address
1946          */
1947         error = bnx_get_eaddr(sc, ether_addr);
1948         if (error) {
1949                 device_printf(dev, "failed to read station address\n");
1950                 goto fail;
1951         }
1952
1953         if (BNX_IS_57765_PLUS(sc)) {
1954                 sc->bnx_return_ring_cnt = BGE_RETURN_RING_CNT;
1955         } else {
1956                 /* 5705/5750 limits RX return ring to 512 entries. */
1957                 sc->bnx_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1958         }
1959
1960         error = bnx_dma_alloc(sc);
1961         if (error)
1962                 goto fail;
1963
1964         /* Set default tuneable values. */
1965         sc->bnx_rx_coal_ticks = BNX_RX_COAL_TICKS_DEF;
1966         sc->bnx_tx_coal_ticks = BNX_TX_COAL_TICKS_DEF;
1967         sc->bnx_rx_coal_bds = BNX_RX_COAL_BDS_DEF;
1968         sc->bnx_tx_coal_bds = BNX_TX_COAL_BDS_DEF;
1969         sc->bnx_rx_coal_bds_int = BNX_RX_COAL_BDS_INT_DEF;
1970         sc->bnx_tx_coal_bds_int = BNX_TX_COAL_BDS_INT_DEF;
1971         sc->bnx_tx_wreg = 8;
1972
1973         /* Set up ifnet structure */
1974         ifp->if_softc = sc;
1975         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1976         ifp->if_ioctl = bnx_ioctl;
1977         ifp->if_start = bnx_start;
1978 #ifdef IFPOLL_ENABLE
1979         ifp->if_npoll = bnx_npoll;
1980 #endif
1981         ifp->if_watchdog = bnx_watchdog;
1982         ifp->if_init = bnx_init;
1983         ifp->if_mtu = ETHERMTU;
1984         ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1985         ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1986         ifq_set_ready(&ifp->if_snd);
1987
1988         ifp->if_capabilities |= IFCAP_HWCSUM;
1989         ifp->if_hwassist = BNX_CSUM_FEATURES;
1990         if (sc->bnx_flags & BNX_FLAG_TSO) {
1991                 ifp->if_capabilities |= IFCAP_TSO;
1992                 ifp->if_hwassist |= CSUM_TSO;
1993         }
1994         ifp->if_capenable = ifp->if_capabilities;
1995
1996         /*
1997          * Figure out what sort of media we have by checking the
1998          * hardware config word in the first 32k of NIC internal memory,
1999          * or fall back to examining the EEPROM if necessary.
2000          * Note: on some BCM5700 cards, this value appears to be unset.
2001          * If that's the case, we have to rely on identifying the NIC
2002          * by its PCI subsystem ID, as we do below for the SysKonnect
2003          * SK-9D41.
2004          */
2005         if (bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) {
2006                 hwcfg = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2007         } else {
2008                 if (bnx_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2009                                     sizeof(hwcfg))) {
2010                         device_printf(dev, "failed to read EEPROM\n");
2011                         error = ENXIO;
2012                         goto fail;
2013                 }
2014                 hwcfg = ntohl(hwcfg);
2015         }
2016
2017         /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2018         if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41 ||
2019             (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2020                 sc->bnx_flags |= BNX_FLAG_TBI;
2021
2022         /* Setup MI MODE */
2023         if (sc->bnx_flags & BNX_FLAG_CPMU)
2024                 sc->bnx_mi_mode = BGE_MIMODE_500KHZ_CONST;
2025         else
2026                 sc->bnx_mi_mode = BGE_MIMODE_BASE;
2027
2028         /* Setup link status update stuffs */
2029         if (sc->bnx_flags & BNX_FLAG_TBI) {
2030                 sc->bnx_link_upd = bnx_tbi_link_upd;
2031                 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
2032         } else if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
2033                 sc->bnx_link_upd = bnx_autopoll_link_upd;
2034                 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
2035         } else {
2036                 sc->bnx_link_upd = bnx_copper_link_upd;
2037                 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
2038         }
2039
2040         /* Set default PHY address */
2041         sc->bnx_phyno = 1;
2042
2043         /*
2044          * PHY address mapping for various devices.
2045          *
2046          *          | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2047          * ---------+-------+-------+-------+-------+
2048          * BCM57XX  |   1   |   X   |   X   |   X   |
2049          * BCM5704  |   1   |   X   |   1   |   X   |
2050          * BCM5717  |   1   |   8   |   2   |   9   |
2051          * BCM5719  |   1   |   8   |   2   |   9   |
2052          * BCM5720  |   1   |   8   |   2   |   9   |
2053          *
2054          * Other addresses may respond but they are not
2055          * IEEE compliant PHYs and should be ignored.
2056          */
2057         if (BNX_IS_5717_PLUS(sc)) {
2058                 int f;
2059
2060                 f = pci_get_function(dev);
2061                 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_A0) {
2062                         if (CSR_READ_4(sc, BGE_SGDIG_STS) &
2063                             BGE_SGDIGSTS_IS_SERDES)
2064                                 sc->bnx_phyno = f + 8;
2065                         else
2066                                 sc->bnx_phyno = f + 1;
2067                 } else {
2068                         if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2069                             BGE_CPMU_PHY_STRAP_IS_SERDES)
2070                                 sc->bnx_phyno = f + 8;
2071                         else
2072                                 sc->bnx_phyno = f + 1;
2073                 }
2074         }
2075
2076         if (sc->bnx_flags & BNX_FLAG_TBI) {
2077                 ifmedia_init(&sc->bnx_ifmedia, IFM_IMASK,
2078                     bnx_ifmedia_upd, bnx_ifmedia_sts);
2079                 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2080                 ifmedia_add(&sc->bnx_ifmedia,
2081                     IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2082                 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2083                 ifmedia_set(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO);
2084                 sc->bnx_ifmedia.ifm_media = sc->bnx_ifmedia.ifm_cur->ifm_media;
2085         } else {
2086                 struct mii_probe_args mii_args;
2087
2088                 mii_probe_args_init(&mii_args, bnx_ifmedia_upd, bnx_ifmedia_sts);
2089                 mii_args.mii_probemask = 1 << sc->bnx_phyno;
2090                 mii_args.mii_capmask = capmask;
2091                 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY;
2092                 mii_args.mii_priv = mii_priv;
2093
2094                 error = mii_probe(dev, &sc->bnx_miibus, &mii_args);
2095                 if (error) {
2096                         device_printf(dev, "MII without any PHY!\n");
2097                         goto fail;
2098                 }
2099         }
2100
2101         /*
2102          * Create sysctl nodes.
2103          */
2104         sysctl_ctx_init(&sc->bnx_sysctl_ctx);
2105         sc->bnx_sysctl_tree = SYSCTL_ADD_NODE(&sc->bnx_sysctl_ctx,
2106                                               SYSCTL_STATIC_CHILDREN(_hw),
2107                                               OID_AUTO,
2108                                               device_get_nameunit(dev),
2109                                               CTLFLAG_RD, 0, "");
2110         if (sc->bnx_sysctl_tree == NULL) {
2111                 device_printf(dev, "can't add sysctl node\n");
2112                 error = ENXIO;
2113                 goto fail;
2114         }
2115
2116         SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2117                         SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2118                         OID_AUTO, "rx_coal_ticks",
2119                         CTLTYPE_INT | CTLFLAG_RW,
2120                         sc, 0, bnx_sysctl_rx_coal_ticks, "I",
2121                         "Receive coalescing ticks (usec).");
2122         SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2123                         SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2124                         OID_AUTO, "tx_coal_ticks",
2125                         CTLTYPE_INT | CTLFLAG_RW,
2126                         sc, 0, bnx_sysctl_tx_coal_ticks, "I",
2127                         "Transmit coalescing ticks (usec).");
2128         SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2129                         SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2130                         OID_AUTO, "rx_coal_bds",
2131                         CTLTYPE_INT | CTLFLAG_RW,
2132                         sc, 0, bnx_sysctl_rx_coal_bds, "I",
2133                         "Receive max coalesced BD count.");
2134         SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2135                         SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2136                         OID_AUTO, "tx_coal_bds",
2137                         CTLTYPE_INT | CTLFLAG_RW,
2138                         sc, 0, bnx_sysctl_tx_coal_bds, "I",
2139                         "Transmit max coalesced BD count.");
2140         /*
2141          * A common design characteristic for many Broadcom
2142          * client controllers is that they only support a
2143          * single outstanding DMA read operation on the PCIe
2144          * bus. This means that it will take twice as long to
2145          * fetch a TX frame that is split into header and
2146          * payload buffers as it does to fetch a single,
2147          * contiguous TX frame (2 reads vs. 1 read). For these
2148          * controllers, coalescing buffers to reduce the number
2149          * of memory reads is effective way to get maximum
2150          * performance(about 940Mbps).  Without collapsing TX
2151          * buffers the maximum TCP bulk transfer performance
2152          * is about 850Mbps. However forcing coalescing mbufs
2153          * consumes a lot of CPU cycles, so leave it off by
2154          * default.
2155          */
2156         SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx,
2157             SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2158             "force_defrag", CTLFLAG_RW, &sc->bnx_force_defrag, 0,
2159             "Force defragment on TX path");
2160
2161         SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx,
2162             SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2163             "tx_wreg", CTLFLAG_RW, &sc->bnx_tx_wreg, 0,
2164             "# of segments before writing to hardware register");
2165
2166         SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2167             SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2168             "rx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2169             sc, 0, bnx_sysctl_rx_coal_bds_int, "I",
2170             "Receive max coalesced BD count during interrupt.");
2171         SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2172             SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2173             "tx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2174             sc, 0, bnx_sysctl_tx_coal_bds_int, "I",
2175             "Transmit max coalesced BD count during interrupt.");
2176
2177 #ifdef BNX_TSO_DEBUG
2178         for (i = 0; i < BNX_TSO_NSTATS; ++i) {
2179                 ksnprintf(desc, sizeof(desc), "tso%d", i + 1);
2180                 SYSCTL_ADD_ULONG(&sc->bnx_sysctl_ctx,
2181                     SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2182                     desc, CTLFLAG_RW, &sc->bnx_tsosegs[i], "");
2183         }
2184 #endif
2185
2186         /*
2187          * Call MI attach routine.
2188          */
2189         ether_ifattach(ifp, ether_addr, NULL);
2190
2191 #ifdef IFPOLL_ENABLE
2192         ifpoll_compat_setup(&sc->bnx_npoll,
2193             &sc->bnx_sysctl_ctx, sc->bnx_sysctl_tree,
2194             device_get_unit(dev), ifp->if_serializer);
2195 #endif
2196
2197         if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) {
2198                 if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) {
2199                         intr_func = bnx_msi_oneshot;
2200                         if (bootverbose)
2201                                 device_printf(dev, "oneshot MSI\n");
2202                 } else {
2203                         intr_func = bnx_msi;
2204                 }
2205         } else {
2206                 intr_func = bnx_intr_legacy;
2207         }
2208         error = bus_setup_intr(dev, sc->bnx_irq, INTR_MPSAFE, intr_func, sc,
2209             &sc->bnx_intrhand, ifp->if_serializer);
2210         if (error) {
2211                 ether_ifdetach(ifp);
2212                 device_printf(dev, "couldn't set up irq\n");
2213                 goto fail;
2214         }
2215
2216         sc->bnx_intr_cpuid = rman_get_cpuid(sc->bnx_irq);
2217         sc->bnx_stat_cpuid = sc->bnx_intr_cpuid;
2218         ifq_set_cpuid(&ifp->if_snd, sc->bnx_intr_cpuid);
2219
2220         return(0);
2221 fail:
2222         bnx_detach(dev);
2223         return(error);
2224 }
2225
2226 static int
2227 bnx_detach(device_t dev)
2228 {
2229         struct bnx_softc *sc = device_get_softc(dev);
2230
2231         if (device_is_attached(dev)) {
2232                 struct ifnet *ifp = &sc->arpcom.ac_if;
2233
2234                 lwkt_serialize_enter(ifp->if_serializer);
2235                 bnx_stop(sc);
2236                 bnx_reset(sc);
2237                 bus_teardown_intr(dev, sc->bnx_irq, sc->bnx_intrhand);
2238                 lwkt_serialize_exit(ifp->if_serializer);
2239
2240                 ether_ifdetach(ifp);
2241         }
2242
2243         if (sc->bnx_flags & BNX_FLAG_TBI)
2244                 ifmedia_removeall(&sc->bnx_ifmedia);
2245         if (sc->bnx_miibus)
2246                 device_delete_child(dev, sc->bnx_miibus);
2247         bus_generic_detach(dev);
2248
2249         if (sc->bnx_irq != NULL) {
2250                 bus_release_resource(dev, SYS_RES_IRQ, sc->bnx_irq_rid,
2251                     sc->bnx_irq);
2252         }
2253         if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI)
2254                 pci_release_msi(dev);
2255
2256         if (sc->bnx_res != NULL) {
2257                 bus_release_resource(dev, SYS_RES_MEMORY,
2258                     BGE_PCI_BAR0, sc->bnx_res);
2259         }
2260
2261         if (sc->bnx_sysctl_tree != NULL)
2262                 sysctl_ctx_free(&sc->bnx_sysctl_ctx);
2263
2264         bnx_dma_free(sc);
2265
2266         return 0;
2267 }
2268
2269 static void
2270 bnx_reset(struct bnx_softc *sc)
2271 {
2272         device_t dev;
2273         uint32_t cachesize, command, pcistate, reset;
2274         void (*write_op)(struct bnx_softc *, uint32_t, uint32_t);
2275         int i, val = 0;
2276         uint16_t devctl;
2277
2278         dev = sc->bnx_dev;
2279
2280         if (sc->bnx_asicrev != BGE_ASICREV_BCM5906)
2281                 write_op = bnx_writemem_direct;
2282         else
2283                 write_op = bnx_writereg_ind;
2284
2285         /* Save some important PCI state. */
2286         cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2287         command = pci_read_config(dev, BGE_PCI_CMD, 4);
2288         pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2289
2290         pci_write_config(dev, BGE_PCI_MISC_CTL,
2291             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2292             BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2293             BGE_PCIMISCCTL_TAGGED_STATUS, 4);
2294
2295         /* Disable fastboot on controllers that support it. */
2296         if (bootverbose)
2297                 if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n");
2298         CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2299
2300         /*
2301          * Write the magic number to SRAM at offset 0xB50.
2302          * When firmware finishes its initialization it will
2303          * write ~BGE_MAGIC_NUMBER to the same location.
2304          */
2305         bnx_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2306
2307         reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2308
2309         /* XXX: Broadcom Linux driver. */
2310         /* Force PCI-E 1.0a mode */
2311         if (!BNX_IS_57765_PLUS(sc) &&
2312             CSR_READ_4(sc, BGE_PCIE_PHY_TSTCTL) ==
2313             (BGE_PCIE_PHY_TSTCTL_PSCRAM |
2314              BGE_PCIE_PHY_TSTCTL_PCIE10)) {
2315                 CSR_WRITE_4(sc, BGE_PCIE_PHY_TSTCTL,
2316                     BGE_PCIE_PHY_TSTCTL_PSCRAM);
2317         }
2318         if (sc->bnx_chipid != BGE_CHIPID_BCM5750_A0) {
2319                 /* Prevent PCIE link training during global reset */
2320                 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2321                 reset |= (1<<29);
2322         }
2323
2324         /* 
2325          * Set GPHY Power Down Override to leave GPHY
2326          * powered up in D0 uninitialized.
2327          */
2328         if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0)
2329                 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
2330
2331         /* Issue global reset */
2332         write_op(sc, BGE_MISC_CFG, reset);
2333
2334         if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
2335                 uint32_t status, ctrl;
2336
2337                 status = CSR_READ_4(sc, BGE_VCPU_STATUS);
2338                 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
2339                     status | BGE_VCPU_STATUS_DRV_RESET);
2340                 ctrl = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
2341                 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
2342                     ctrl & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
2343         }
2344
2345         DELAY(1000);
2346
2347         /* XXX: Broadcom Linux driver. */
2348         if (sc->bnx_chipid == BGE_CHIPID_BCM5750_A0) {
2349                 uint32_t v;
2350
2351                 DELAY(500000); /* wait for link training to complete */
2352                 v = pci_read_config(dev, 0xc4, 4);
2353                 pci_write_config(dev, 0xc4, v | (1<<15), 4);
2354         }
2355
2356         devctl = pci_read_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL, 2);
2357
2358         /* Disable no snoop and disable relaxed ordering. */
2359         devctl &= ~(PCIEM_DEVCTL_RELAX_ORDER | PCIEM_DEVCTL_NOSNOOP);
2360
2361         /* Old PCI-E chips only support 128 bytes Max PayLoad Size. */
2362         if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0) {
2363                 devctl &= ~PCIEM_DEVCTL_MAX_PAYLOAD_MASK;
2364                 devctl |= PCIEM_DEVCTL_MAX_PAYLOAD_128;
2365         }
2366
2367         pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL,
2368             devctl, 2);
2369
2370         /* Clear error status. */
2371         pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVSTS,
2372             PCIEM_DEVSTS_CORR_ERR |
2373             PCIEM_DEVSTS_NFATAL_ERR |
2374             PCIEM_DEVSTS_FATAL_ERR |
2375             PCIEM_DEVSTS_UNSUPP_REQ, 2);
2376
2377         /* Reset some of the PCI state that got zapped by reset */
2378         pci_write_config(dev, BGE_PCI_MISC_CTL,
2379             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2380             BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2381             BGE_PCIMISCCTL_TAGGED_STATUS, 4);
2382         pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2383         pci_write_config(dev, BGE_PCI_CMD, command, 4);
2384         write_op(sc, BGE_MISC_CFG, (65 << 1));
2385
2386         /* Enable memory arbiter */
2387         CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2388
2389         if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
2390                 for (i = 0; i < BNX_TIMEOUT; i++) {
2391                         val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2392                         if (val & BGE_VCPU_STATUS_INIT_DONE)
2393                                 break;
2394                         DELAY(100);
2395                 }
2396                 if (i == BNX_TIMEOUT) {
2397                         if_printf(&sc->arpcom.ac_if, "reset timed out\n");
2398                         return;
2399                 }
2400         } else {
2401                 /*
2402                  * Poll until we see the 1's complement of the magic number.
2403                  * This indicates that the firmware initialization
2404                  * is complete.
2405                  */
2406                 for (i = 0; i < BNX_FIRMWARE_TIMEOUT; i++) {
2407                         val = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2408                         if (val == ~BGE_MAGIC_NUMBER)
2409                                 break;
2410                         DELAY(10);
2411                 }
2412                 if (i == BNX_FIRMWARE_TIMEOUT) {
2413                         if_printf(&sc->arpcom.ac_if, "firmware handshake "
2414                                   "timed out, found 0x%08x\n", val);
2415                 }
2416
2417                 /* BCM57765 A0 needs additional time before accessing. */
2418                 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0)
2419                         DELAY(10 * 1000);
2420         }
2421
2422         /*
2423          * XXX Wait for the value of the PCISTATE register to
2424          * return to its original pre-reset state. This is a
2425          * fairly good indicator of reset completion. If we don't
2426          * wait for the reset to fully complete, trying to read
2427          * from the device's non-PCI registers may yield garbage
2428          * results.
2429          */
2430         for (i = 0; i < BNX_TIMEOUT; i++) {
2431                 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2432                         break;
2433                 DELAY(10);
2434         }
2435
2436         /* Fix up byte swapping */
2437         CSR_WRITE_4(sc, BGE_MODE_CTL, bnx_dma_swap_options(sc));
2438
2439         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2440
2441         /*
2442          * The 5704 in TBI mode apparently needs some special
2443          * adjustment to insure the SERDES drive level is set
2444          * to 1.2V.
2445          */
2446         if (sc->bnx_asicrev == BGE_ASICREV_BCM5704 &&
2447             (sc->bnx_flags & BNX_FLAG_TBI)) {
2448                 uint32_t serdescfg;
2449
2450                 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2451                 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2452                 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2453         }
2454
2455         CSR_WRITE_4(sc, BGE_MI_MODE,
2456             sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
2457         DELAY(80);
2458
2459         /* XXX: Broadcom Linux driver. */
2460         if (!BNX_IS_57765_PLUS(sc)) {
2461                 uint32_t v;
2462
2463                 /* Enable Data FIFO protection. */
2464                 v = CSR_READ_4(sc, BGE_PCIE_TLDLPL_PORT);
2465                 CSR_WRITE_4(sc, BGE_PCIE_TLDLPL_PORT, v | (1 << 25));
2466         }
2467
2468         DELAY(10000);
2469
2470         if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
2471                 BNX_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
2472                     CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
2473         }
2474 }
2475
2476 /*
2477  * Frame reception handling. This is called if there's a frame
2478  * on the receive return list.
2479  *
2480  * Note: we have to be able to handle two possibilities here:
2481  * 1) the frame is from the jumbo recieve ring
2482  * 2) the frame is from the standard receive ring
2483  */
2484
2485 static void
2486 bnx_rxeof(struct bnx_softc *sc, uint16_t rx_prod, int count)
2487 {
2488         struct ifnet *ifp;
2489         int stdcnt = 0, jumbocnt = 0;
2490
2491         ifp = &sc->arpcom.ac_if;
2492
2493         while (sc->bnx_rx_saved_considx != rx_prod && count != 0) {
2494                 struct bge_rx_bd        *cur_rx;
2495                 uint32_t                rxidx;
2496                 struct mbuf             *m = NULL;
2497                 uint16_t                vlan_tag = 0;
2498                 int                     have_tag = 0;
2499
2500                 --count;
2501
2502                 cur_rx =
2503             &sc->bnx_ldata.bnx_rx_return_ring[sc->bnx_rx_saved_considx];
2504
2505                 rxidx = cur_rx->bge_idx;
2506                 BNX_INC(sc->bnx_rx_saved_considx, sc->bnx_return_ring_cnt);
2507
2508                 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2509                         have_tag = 1;
2510                         vlan_tag = cur_rx->bge_vlan_tag;
2511                 }
2512
2513                 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2514                         BNX_INC(sc->bnx_jumbo, BGE_JUMBO_RX_RING_CNT);
2515                         jumbocnt++;
2516
2517                         if (rxidx != sc->bnx_jumbo) {
2518                                 ifp->if_ierrors++;
2519                                 if_printf(ifp, "sw jumbo index(%d) "
2520                                     "and hw jumbo index(%d) mismatch, drop!\n",
2521                                     sc->bnx_jumbo, rxidx);
2522                                 bnx_setup_rxdesc_jumbo(sc, rxidx);
2523                                 continue;
2524                         }
2525
2526                         m = sc->bnx_cdata.bnx_rx_jumbo_chain[rxidx].bnx_mbuf;
2527                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2528                                 ifp->if_ierrors++;
2529                                 bnx_setup_rxdesc_jumbo(sc, sc->bnx_jumbo);
2530                                 continue;
2531                         }
2532                         if (bnx_newbuf_jumbo(sc, sc->bnx_jumbo, 0)) {
2533                                 ifp->if_ierrors++;
2534                                 bnx_setup_rxdesc_jumbo(sc, sc->bnx_jumbo);
2535                                 continue;
2536                         }
2537                 } else {
2538                         BNX_INC(sc->bnx_std, BGE_STD_RX_RING_CNT);
2539                         stdcnt++;
2540
2541                         if (rxidx != sc->bnx_std) {
2542                                 ifp->if_ierrors++;
2543                                 if_printf(ifp, "sw std index(%d) "
2544                                     "and hw std index(%d) mismatch, drop!\n",
2545                                     sc->bnx_std, rxidx);
2546                                 bnx_setup_rxdesc_std(sc, rxidx);
2547                                 continue;
2548                         }
2549
2550                         m = sc->bnx_cdata.bnx_rx_std_chain[rxidx].bnx_mbuf;
2551                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2552                                 ifp->if_ierrors++;
2553                                 bnx_setup_rxdesc_std(sc, sc->bnx_std);
2554                                 continue;
2555                         }
2556                         if (bnx_newbuf_std(sc, sc->bnx_std, 0)) {
2557                                 ifp->if_ierrors++;
2558                                 bnx_setup_rxdesc_std(sc, sc->bnx_std);
2559                                 continue;
2560                         }
2561                 }
2562
2563                 ifp->if_ipackets++;
2564                 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2565                 m->m_pkthdr.rcvif = ifp;
2566
2567                 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2568                     (cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
2569                         if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2570                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2571                                 if ((cur_rx->bge_error_flag &
2572                                     BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
2573                                         m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2574                         }
2575                         if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2576                                 m->m_pkthdr.csum_data =
2577                                     cur_rx->bge_tcp_udp_csum;
2578                                 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
2579                                     CSUM_PSEUDO_HDR;
2580                         }
2581                 }
2582
2583                 /*
2584                  * If we received a packet with a vlan tag, pass it
2585                  * to vlan_input() instead of ether_input().
2586                  */
2587                 if (have_tag) {
2588                         m->m_flags |= M_VLANTAG;
2589                         m->m_pkthdr.ether_vlantag = vlan_tag;
2590                 }
2591                 ifp->if_input(ifp, m);
2592         }
2593
2594         bnx_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bnx_rx_saved_considx);
2595         if (stdcnt)
2596                 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bnx_std);
2597         if (jumbocnt)
2598                 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo);
2599 }
2600
2601 static void
2602 bnx_txeof(struct bnx_softc *sc, uint16_t tx_cons)
2603 {
2604         struct ifnet *ifp;
2605
2606         ifp = &sc->arpcom.ac_if;
2607
2608         /*
2609          * Go through our tx ring and free mbufs for those
2610          * frames that have been sent.
2611          */
2612         while (sc->bnx_tx_saved_considx != tx_cons) {
2613                 uint32_t idx = 0;
2614
2615                 idx = sc->bnx_tx_saved_considx;
2616                 if (sc->bnx_cdata.bnx_tx_chain[idx] != NULL) {
2617                         ifp->if_opackets++;
2618                         bus_dmamap_unload(sc->bnx_cdata.bnx_tx_mtag,
2619                             sc->bnx_cdata.bnx_tx_dmamap[idx]);
2620                         m_freem(sc->bnx_cdata.bnx_tx_chain[idx]);
2621                         sc->bnx_cdata.bnx_tx_chain[idx] = NULL;
2622                 }
2623                 sc->bnx_txcnt--;
2624                 BNX_INC(sc->bnx_tx_saved_considx, BGE_TX_RING_CNT);
2625         }
2626
2627         if ((BGE_TX_RING_CNT - sc->bnx_txcnt) >=
2628             (BNX_NSEG_RSVD + BNX_NSEG_SPARE))
2629                 ifq_clr_oactive(&ifp->if_snd);
2630
2631         if (sc->bnx_txcnt == 0)
2632                 ifp->if_timer = 0;
2633
2634         if (!ifq_is_empty(&ifp->if_snd))
2635                 if_devstart(ifp);
2636 }
2637
2638 #ifdef IFPOLL_ENABLE
2639
2640 static void
2641 bnx_npoll(struct ifnet *ifp, struct ifpoll_info *info)
2642 {
2643         struct bnx_softc *sc = ifp->if_softc;
2644
2645         ASSERT_SERIALIZED(ifp->if_serializer);
2646
2647         if (info != NULL) {
2648                 int cpuid = sc->bnx_npoll.ifpc_cpuid;
2649
2650                 info->ifpi_rx[cpuid].poll_func = bnx_npoll_compat;
2651                 info->ifpi_rx[cpuid].arg = NULL;
2652                 info->ifpi_rx[cpuid].serializer = ifp->if_serializer;
2653
2654                 if (ifp->if_flags & IFF_RUNNING)
2655                         bnx_disable_intr(sc);
2656                 ifq_set_cpuid(&ifp->if_snd, cpuid);
2657         } else {
2658                 if (ifp->if_flags & IFF_RUNNING)
2659                         bnx_enable_intr(sc);
2660                 ifq_set_cpuid(&ifp->if_snd, sc->bnx_intr_cpuid);
2661         }
2662 }
2663
2664 static void
2665 bnx_npoll_compat(struct ifnet *ifp, void *arg __unused, int cycle)
2666 {
2667         struct bnx_softc *sc = ifp->if_softc;
2668         struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
2669         uint16_t rx_prod, tx_cons;
2670
2671         ASSERT_SERIALIZED(ifp->if_serializer);
2672
2673         if (sc->bnx_npoll.ifpc_stcount-- == 0) {
2674                 sc->bnx_npoll.ifpc_stcount = sc->bnx_npoll.ifpc_stfrac;
2675                 /*
2676                  * Process link state changes.
2677                  */
2678                 bnx_link_poll(sc);
2679         }
2680
2681         sc->bnx_status_tag = sblk->bge_status_tag;
2682
2683         /*
2684          * Use a load fence to ensure that status_tag is saved
2685          * before rx_prod and tx_cons.
2686          */
2687         cpu_lfence();
2688
2689         rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
2690         tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
2691
2692         if (sc->bnx_rx_saved_considx != rx_prod)
2693                 bnx_rxeof(sc, rx_prod, cycle);
2694
2695         if (sc->bnx_tx_saved_considx != tx_cons)
2696                 bnx_txeof(sc, tx_cons);
2697
2698         if (sc->bnx_coal_chg)
2699                 bnx_coal_change(sc);
2700 }
2701
2702 #endif  /* IFPOLL_ENABLE */
2703
2704 static void
2705 bnx_intr_legacy(void *xsc)
2706 {
2707         struct bnx_softc *sc = xsc;
2708         struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
2709
2710         if (sc->bnx_status_tag == sblk->bge_status_tag) {
2711                 uint32_t val;
2712
2713                 val = pci_read_config(sc->bnx_dev, BGE_PCI_PCISTATE, 4);
2714                 if (val & BGE_PCISTAT_INTR_NOTACT)
2715                         return;
2716         }
2717
2718         /*
2719          * NOTE:
2720          * Interrupt will have to be disabled if tagged status
2721          * is used, else interrupt will always be asserted on
2722          * certain chips (at least on BCM5750 AX/BX).
2723          */
2724         bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
2725
2726         bnx_intr(sc);
2727 }
2728
2729 static void
2730 bnx_msi(void *xsc)
2731 {
2732         struct bnx_softc *sc = xsc;
2733
2734         /* Disable interrupt first */
2735         bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
2736         bnx_intr(sc);
2737 }
2738
2739 static void
2740 bnx_msi_oneshot(void *xsc)
2741 {
2742         bnx_intr(xsc);
2743 }
2744
2745 static void
2746 bnx_intr(struct bnx_softc *sc)
2747 {
2748         struct ifnet *ifp = &sc->arpcom.ac_if;
2749         struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
2750         uint16_t rx_prod, tx_cons;
2751         uint32_t status;
2752
2753         sc->bnx_status_tag = sblk->bge_status_tag;
2754         /*
2755          * Use a load fence to ensure that status_tag is saved 
2756          * before rx_prod, tx_cons and status.
2757          */
2758         cpu_lfence();
2759
2760         rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
2761         tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
2762         status = sblk->bge_status;
2763
2764         if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) || sc->bnx_link_evt)
2765                 bnx_link_poll(sc);
2766
2767         if (ifp->if_flags & IFF_RUNNING) {
2768                 if (sc->bnx_rx_saved_considx != rx_prod)
2769                         bnx_rxeof(sc, rx_prod, -1);
2770
2771                 if (sc->bnx_tx_saved_considx != tx_cons)
2772                         bnx_txeof(sc, tx_cons);
2773         }
2774
2775         bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24);
2776
2777         if (sc->bnx_coal_chg)
2778                 bnx_coal_change(sc);
2779 }
2780
2781 static void
2782 bnx_tick(void *xsc)
2783 {
2784         struct bnx_softc *sc = xsc;
2785         struct ifnet *ifp = &sc->arpcom.ac_if;
2786
2787         lwkt_serialize_enter(ifp->if_serializer);
2788
2789         KKASSERT(mycpuid == sc->bnx_stat_cpuid);
2790
2791         bnx_stats_update_regs(sc);
2792
2793         if (sc->bnx_flags & BNX_FLAG_TBI) {
2794                 /*
2795                  * Since in TBI mode auto-polling can't be used we should poll
2796                  * link status manually. Here we register pending link event
2797                  * and trigger interrupt.
2798                  */
2799                 sc->bnx_link_evt++;
2800                 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
2801         } else if (!sc->bnx_link) {
2802                 mii_tick(device_get_softc(sc->bnx_miibus));
2803         }
2804
2805         callout_reset(&sc->bnx_stat_timer, hz, bnx_tick, sc);
2806
2807         lwkt_serialize_exit(ifp->if_serializer);
2808 }
2809
2810 static void
2811 bnx_stats_update_regs(struct bnx_softc *sc)
2812 {
2813         struct ifnet *ifp = &sc->arpcom.ac_if;
2814         struct bge_mac_stats_regs stats;
2815         uint32_t *s;
2816         int i;
2817
2818         s = (uint32_t *)&stats;
2819         for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2820                 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2821                 s++;
2822         }
2823
2824         ifp->if_collisions +=
2825            (stats.dot3StatsSingleCollisionFrames +
2826            stats.dot3StatsMultipleCollisionFrames +
2827            stats.dot3StatsExcessiveCollisions +
2828            stats.dot3StatsLateCollisions) -
2829            ifp->if_collisions;
2830 }
2831
2832 /*
2833  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
2834  * pointers to descriptors.
2835  */
2836 static int
2837 bnx_encap(struct bnx_softc *sc, struct mbuf **m_head0, uint32_t *txidx,
2838     int *segs_used)
2839 {
2840         struct bge_tx_bd *d = NULL;
2841         uint16_t csum_flags = 0, vlan_tag = 0, mss = 0;
2842         bus_dma_segment_t segs[BNX_NSEG_NEW];
2843         bus_dmamap_t map;
2844         int error, maxsegs, nsegs, idx, i;
2845         struct mbuf *m_head = *m_head0, *m_new;
2846
2847         if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
2848 #ifdef BNX_TSO_DEBUG
2849                 int tso_nsegs;
2850 #endif
2851
2852                 error = bnx_setup_tso(sc, m_head0, &mss, &csum_flags);
2853                 if (error)
2854                         return error;
2855                 m_head = *m_head0;
2856
2857 #ifdef BNX_TSO_DEBUG
2858                 tso_nsegs = (m_head->m_pkthdr.len /
2859                     m_head->m_pkthdr.tso_segsz) - 1;
2860                 if (tso_nsegs > (BNX_TSO_NSTATS - 1))
2861                         tso_nsegs = BNX_TSO_NSTATS - 1;
2862                 else if (tso_nsegs < 0)
2863                         tso_nsegs = 0;
2864                 sc->bnx_tsosegs[tso_nsegs]++;
2865 #endif
2866         } else if (m_head->m_pkthdr.csum_flags & BNX_CSUM_FEATURES) {
2867                 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2868                         csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2869                 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2870                         csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2871                 if (m_head->m_flags & M_LASTFRAG)
2872                         csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2873                 else if (m_head->m_flags & M_FRAG)
2874                         csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2875         }
2876         if (m_head->m_flags & M_VLANTAG) {
2877                 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
2878                 vlan_tag = m_head->m_pkthdr.ether_vlantag;
2879         }
2880
2881         idx = *txidx;
2882         map = sc->bnx_cdata.bnx_tx_dmamap[idx];
2883
2884         maxsegs = (BGE_TX_RING_CNT - sc->bnx_txcnt) - BNX_NSEG_RSVD;
2885         KASSERT(maxsegs >= BNX_NSEG_SPARE,
2886                 ("not enough segments %d", maxsegs));
2887
2888         if (maxsegs > BNX_NSEG_NEW)
2889                 maxsegs = BNX_NSEG_NEW;
2890
2891         /*
2892          * Pad outbound frame to BGE_MIN_FRAMELEN for an unusual reason.
2893          * The bge hardware will pad out Tx runts to BGE_MIN_FRAMELEN,
2894          * but when such padded frames employ the bge IP/TCP checksum
2895          * offload, the hardware checksum assist gives incorrect results
2896          * (possibly from incorporating its own padding into the UDP/TCP
2897          * checksum; who knows).  If we pad such runts with zeros, the
2898          * onboard checksum comes out correct.
2899          */
2900         if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
2901             m_head->m_pkthdr.len < BNX_MIN_FRAMELEN) {
2902                 error = m_devpad(m_head, BNX_MIN_FRAMELEN);
2903                 if (error)
2904                         goto back;
2905         }
2906
2907         if ((sc->bnx_flags & BNX_FLAG_SHORTDMA) && m_head->m_next != NULL) {
2908                 m_new = bnx_defrag_shortdma(m_head);
2909                 if (m_new == NULL) {
2910                         error = ENOBUFS;
2911                         goto back;
2912                 }
2913                 *m_head0 = m_head = m_new;
2914         }
2915         if ((m_head->m_pkthdr.csum_flags & CSUM_TSO) == 0 &&
2916             sc->bnx_force_defrag && m_head->m_next != NULL) {
2917                 /*
2918                  * Forcefully defragment mbuf chain to overcome hardware
2919                  * limitation which only support a single outstanding
2920                  * DMA read operation.  If it fails, keep moving on using
2921                  * the original mbuf chain.
2922                  */
2923                 m_new = m_defrag(m_head, MB_DONTWAIT);
2924                 if (m_new != NULL)
2925                         *m_head0 = m_head = m_new;
2926         }
2927
2928         error = bus_dmamap_load_mbuf_defrag(sc->bnx_cdata.bnx_tx_mtag, map,
2929                         m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
2930         if (error)
2931                 goto back;
2932         *segs_used += nsegs;
2933
2934         m_head = *m_head0;
2935         bus_dmamap_sync(sc->bnx_cdata.bnx_tx_mtag, map, BUS_DMASYNC_PREWRITE);
2936
2937         for (i = 0; ; i++) {
2938                 d = &sc->bnx_ldata.bnx_tx_ring[idx];
2939
2940                 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
2941                 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
2942                 d->bge_len = segs[i].ds_len;
2943                 d->bge_flags = csum_flags;
2944                 d->bge_vlan_tag = vlan_tag;
2945                 d->bge_mss = mss;
2946
2947                 if (i == nsegs - 1)
2948                         break;
2949                 BNX_INC(idx, BGE_TX_RING_CNT);
2950         }
2951         /* Mark the last segment as end of packet... */
2952         d->bge_flags |= BGE_TXBDFLAG_END;
2953
2954         /*
2955          * Insure that the map for this transmission is placed at
2956          * the array index of the last descriptor in this chain.
2957          */
2958         sc->bnx_cdata.bnx_tx_dmamap[*txidx] = sc->bnx_cdata.bnx_tx_dmamap[idx];
2959         sc->bnx_cdata.bnx_tx_dmamap[idx] = map;
2960         sc->bnx_cdata.bnx_tx_chain[idx] = m_head;
2961         sc->bnx_txcnt += nsegs;
2962
2963         BNX_INC(idx, BGE_TX_RING_CNT);
2964         *txidx = idx;
2965 back:
2966         if (error) {
2967                 m_freem(*m_head0);
2968                 *m_head0 = NULL;
2969         }
2970         return error;
2971 }
2972
2973 /*
2974  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2975  * to the mbuf data regions directly in the transmit descriptors.
2976  */
2977 static void
2978 bnx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
2979 {
2980         struct bnx_softc *sc = ifp->if_softc;
2981         struct mbuf *m_head = NULL;
2982         uint32_t prodidx;
2983         int nsegs = 0;
2984
2985         ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
2986
2987         if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
2988                 return;
2989
2990         prodidx = sc->bnx_tx_prodidx;
2991
2992         while (sc->bnx_cdata.bnx_tx_chain[prodidx] == NULL) {
2993                 /*
2994                  * Sanity check: avoid coming within BGE_NSEG_RSVD
2995                  * descriptors of the end of the ring.  Also make
2996                  * sure there are BGE_NSEG_SPARE descriptors for
2997                  * jumbo buffers' or TSO segments' defragmentation.
2998                  */
2999                 if ((BGE_TX_RING_CNT - sc->bnx_txcnt) <
3000                     (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) {
3001                         ifq_set_oactive(&ifp->if_snd);
3002                         break;
3003                 }
3004
3005                 m_head = ifq_dequeue(&ifp->if_snd, NULL);
3006                 if (m_head == NULL)
3007                         break;
3008
3009                 /*
3010                  * Pack the data into the transmit ring. If we
3011                  * don't have room, set the OACTIVE flag and wait
3012                  * for the NIC to drain the ring.
3013                  */
3014                 if (bnx_encap(sc, &m_head, &prodidx, &nsegs)) {
3015                         ifq_set_oactive(&ifp->if_snd);
3016                         ifp->if_oerrors++;
3017                         break;
3018                 }
3019
3020                 if (nsegs >= sc->bnx_tx_wreg) {
3021                         /* Transmit */
3022                         bnx_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3023                         nsegs = 0;
3024                 }
3025
3026                 ETHER_BPF_MTAP(ifp, m_head);
3027
3028                 /*
3029                  * Set a timeout in case the chip goes out to lunch.
3030                  */
3031                 ifp->if_timer = 5;
3032         }
3033
3034         if (nsegs > 0) {
3035                 /* Transmit */
3036                 bnx_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3037         }
3038         sc->bnx_tx_prodidx = prodidx;
3039 }
3040
3041 static void
3042 bnx_init(void *xsc)
3043 {
3044         struct bnx_softc *sc = xsc;
3045         struct ifnet *ifp = &sc->arpcom.ac_if;
3046         uint16_t *m;
3047         uint32_t mode;
3048
3049         ASSERT_SERIALIZED(ifp->if_serializer);
3050
3051         /* Cancel pending I/O and flush buffers. */
3052         bnx_stop(sc);
3053         bnx_reset(sc);
3054         bnx_chipinit(sc);
3055
3056         /*
3057          * Init the various state machines, ring
3058          * control blocks and firmware.
3059          */
3060         if (bnx_blockinit(sc)) {
3061                 if_printf(ifp, "initialization failure\n");
3062                 bnx_stop(sc);
3063                 return;
3064         }
3065
3066         /* Specify MTU. */
3067         CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3068             ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
3069
3070         /* Load our MAC address. */
3071         m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
3072         CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3073         CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3074
3075         /* Enable or disable promiscuous mode as needed. */
3076         bnx_setpromisc(sc);
3077
3078         /* Program multicast filter. */
3079         bnx_setmulti(sc);
3080
3081         /* Init RX ring. */
3082         if (bnx_init_rx_ring_std(sc)) {
3083                 if_printf(ifp, "RX ring initialization failed\n");
3084                 bnx_stop(sc);
3085                 return;
3086         }
3087
3088         /* Init jumbo RX ring. */
3089         if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) {
3090                 if (bnx_init_rx_ring_jumbo(sc)) {
3091                         if_printf(ifp, "Jumbo RX ring initialization failed\n");
3092                         bnx_stop(sc);
3093                         return;
3094                 }
3095         }
3096
3097         /* Init our RX return ring index */
3098         sc->bnx_rx_saved_considx = 0;
3099
3100         /* Init TX ring. */
3101         bnx_init_tx_ring(sc);
3102
3103         /* Enable TX MAC state machine lockup fix. */
3104         mode = CSR_READ_4(sc, BGE_TX_MODE);
3105         mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
3106         if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
3107                 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
3108                 mode |= CSR_READ_4(sc, BGE_TX_MODE) &
3109                     (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
3110         }
3111         /* Turn on transmitter */
3112         CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
3113
3114         /* Turn on receiver */
3115         BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3116
3117         /*
3118          * Set the number of good frames to receive after RX MBUF
3119          * Low Watermark has been reached.  After the RX MAC receives
3120          * this number of frames, it will drop subsequent incoming
3121          * frames until the MBUF High Watermark is reached.
3122          */
3123         if (BNX_IS_57765_FAMILY(sc))
3124                 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
3125         else
3126                 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
3127
3128         if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) {
3129                 if (bootverbose) {
3130                         if_printf(ifp, "MSI_MODE: %#x\n",
3131                             CSR_READ_4(sc, BGE_MSI_MODE));
3132                 }
3133         }
3134
3135         /* Tell firmware we're alive. */
3136         BNX_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3137
3138         /* Enable host interrupts if polling(4) is not enabled. */
3139         PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA, 4);
3140 #ifdef IFPOLL_ENABLE
3141         if (ifp->if_flags & IFF_NPOLLING)
3142                 bnx_disable_intr(sc);
3143         else
3144 #endif
3145         bnx_enable_intr(sc);
3146
3147         bnx_ifmedia_upd(ifp);
3148
3149         ifp->if_flags |= IFF_RUNNING;
3150         ifq_clr_oactive(&ifp->if_snd);
3151
3152         callout_reset_bycpu(&sc->bnx_stat_timer, hz, bnx_tick, sc,
3153             sc->bnx_stat_cpuid);
3154 }
3155
3156 /*
3157  * Set media options.
3158  */
3159 static int
3160 bnx_ifmedia_upd(struct ifnet *ifp)
3161 {
3162         struct bnx_softc *sc = ifp->if_softc;
3163
3164         /* If this is a 1000baseX NIC, enable the TBI port. */
3165         if (sc->bnx_flags & BNX_FLAG_TBI) {
3166                 struct ifmedia *ifm = &sc->bnx_ifmedia;
3167
3168                 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3169                         return(EINVAL);
3170
3171                 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3172                 case IFM_AUTO:
3173                         break;
3174
3175                 case IFM_1000_SX:
3176                         if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3177                                 BNX_CLRBIT(sc, BGE_MAC_MODE,
3178                                     BGE_MACMODE_HALF_DUPLEX);
3179                         } else {
3180                                 BNX_SETBIT(sc, BGE_MAC_MODE,
3181                                     BGE_MACMODE_HALF_DUPLEX);
3182                         }
3183                         break;
3184                 default:
3185                         return(EINVAL);
3186                 }
3187         } else {
3188                 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3189
3190                 sc->bnx_link_evt++;
3191                 sc->bnx_link = 0;
3192                 if (mii->mii_instance) {
3193                         struct mii_softc *miisc;
3194
3195                         LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3196                                 mii_phy_reset(miisc);
3197                 }
3198                 mii_mediachg(mii);
3199
3200                 /*
3201                  * Force an interrupt so that we will call bnx_link_upd
3202                  * if needed and clear any pending link state attention.
3203                  * Without this we are not getting any further interrupts
3204                  * for link state changes and thus will not UP the link and
3205                  * not be able to send in bnx_start.  The only way to get
3206                  * things working was to receive a packet and get an RX
3207                  * intr.
3208                  *
3209                  * bnx_tick should help for fiber cards and we might not
3210                  * need to do this here if BNX_FLAG_TBI is set but as
3211                  * we poll for fiber anyway it should not harm.
3212                  */
3213                 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3214         }
3215         return(0);
3216 }
3217
3218 /*
3219  * Report current media status.
3220  */
3221 static void
3222 bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3223 {
3224         struct bnx_softc *sc = ifp->if_softc;
3225
3226         if (sc->bnx_flags & BNX_FLAG_TBI) {
3227                 ifmr->ifm_status = IFM_AVALID;
3228                 ifmr->ifm_active = IFM_ETHER;
3229                 if (CSR_READ_4(sc, BGE_MAC_STS) &
3230                     BGE_MACSTAT_TBI_PCS_SYNCHED) {
3231                         ifmr->ifm_status |= IFM_ACTIVE;
3232                 } else {
3233                         ifmr->ifm_active |= IFM_NONE;
3234                         return;
3235                 }
3236
3237                 ifmr->ifm_active |= IFM_1000_SX;
3238                 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3239                         ifmr->ifm_active |= IFM_HDX;    
3240                 else
3241                         ifmr->ifm_active |= IFM_FDX;
3242         } else {
3243                 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3244
3245                 mii_pollstat(mii);
3246                 ifmr->ifm_active = mii->mii_media_active;
3247                 ifmr->ifm_status = mii->mii_media_status;
3248         }
3249 }
3250
3251 static int
3252 bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
3253 {
3254         struct bnx_softc *sc = ifp->if_softc;
3255         struct ifreq *ifr = (struct ifreq *)data;
3256         int mask, error = 0;
3257
3258         ASSERT_SERIALIZED(ifp->if_serializer);
3259
3260         switch (command) {
3261         case SIOCSIFMTU:
3262                 if ((!BNX_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) ||
3263                     (BNX_IS_JUMBO_CAPABLE(sc) &&
3264                      ifr->ifr_mtu > BNX_JUMBO_MTU)) {
3265                         error = EINVAL;
3266                 } else if (ifp->if_mtu != ifr->ifr_mtu) {
3267                         ifp->if_mtu = ifr->ifr_mtu;
3268                         if (ifp->if_flags & IFF_RUNNING)
3269                                 bnx_init(sc);
3270                 }
3271                 break;
3272         case SIOCSIFFLAGS:
3273                 if (ifp->if_flags & IFF_UP) {
3274                         if (ifp->if_flags & IFF_RUNNING) {
3275                                 mask = ifp->if_flags ^ sc->bnx_if_flags;
3276
3277                                 /*
3278                                  * If only the state of the PROMISC flag
3279                                  * changed, then just use the 'set promisc
3280                                  * mode' command instead of reinitializing
3281                                  * the entire NIC. Doing a full re-init
3282                                  * means reloading the firmware and waiting
3283                                  * for it to start up, which may take a
3284                                  * second or two.  Similarly for ALLMULTI.
3285                                  */
3286                                 if (mask & IFF_PROMISC)
3287                                         bnx_setpromisc(sc);
3288                                 if (mask & IFF_ALLMULTI)
3289                                         bnx_setmulti(sc);
3290                         } else {
3291                                 bnx_init(sc);
3292                         }
3293                 } else if (ifp->if_flags & IFF_RUNNING) {
3294                         bnx_stop(sc);
3295                 }
3296                 sc->bnx_if_flags = ifp->if_flags;
3297                 break;
3298         case SIOCADDMULTI:
3299         case SIOCDELMULTI:
3300                 if (ifp->if_flags & IFF_RUNNING)
3301                         bnx_setmulti(sc);
3302                 break;
3303         case SIOCSIFMEDIA:
3304         case SIOCGIFMEDIA:
3305                 if (sc->bnx_flags & BNX_FLAG_TBI) {
3306                         error = ifmedia_ioctl(ifp, ifr,
3307                             &sc->bnx_ifmedia, command);
3308                 } else {
3309                         struct mii_data *mii;
3310
3311                         mii = device_get_softc(sc->bnx_miibus);
3312                         error = ifmedia_ioctl(ifp, ifr,
3313                                               &mii->mii_media, command);
3314                 }
3315                 break;
3316         case SIOCSIFCAP:
3317                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3318                 if (mask & IFCAP_HWCSUM) {
3319                         ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
3320                         if (ifp->if_capenable & IFCAP_TXCSUM)
3321                                 ifp->if_hwassist |= BNX_CSUM_FEATURES;
3322                         else
3323                                 ifp->if_hwassist &= ~BNX_CSUM_FEATURES;
3324                 }
3325                 if (mask & IFCAP_TSO) {
3326                         ifp->if_capenable ^= (mask & IFCAP_TSO);
3327                         if (ifp->if_capenable & IFCAP_TSO)
3328                                 ifp->if_hwassist |= CSUM_TSO;
3329                         else
3330                                 ifp->if_hwassist &= ~CSUM_TSO;
3331                 }
3332                 break;
3333         default:
3334                 error = ether_ioctl(ifp, command, data);
3335                 break;
3336         }
3337         return error;
3338 }
3339
3340 static void
3341 bnx_watchdog(struct ifnet *ifp)
3342 {
3343         struct bnx_softc *sc = ifp->if_softc;
3344
3345         if_printf(ifp, "watchdog timeout -- resetting\n");
3346
3347         bnx_init(sc);
3348
3349         ifp->if_oerrors++;
3350
3351         if (!ifq_is_empty(&ifp->if_snd))
3352                 if_devstart(ifp);
3353 }
3354
3355 /*
3356  * Stop the adapter and free any mbufs allocated to the
3357  * RX and TX lists.
3358  */
3359 static void
3360 bnx_stop(struct bnx_softc *sc)
3361 {
3362         struct ifnet *ifp = &sc->arpcom.ac_if;
3363
3364         ASSERT_SERIALIZED(ifp->if_serializer);
3365
3366         callout_stop(&sc->bnx_stat_timer);
3367
3368         /*
3369          * Disable all of the receiver blocks
3370          */
3371         bnx_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3372         bnx_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3373         bnx_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3374         bnx_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3375         bnx_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3376         bnx_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3377
3378         /*
3379          * Disable all of the transmit blocks
3380          */
3381         bnx_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3382         bnx_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3383         bnx_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3384         bnx_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3385         bnx_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3386         bnx_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3387
3388         /*
3389          * Shut down all of the memory managers and related
3390          * state machines.
3391          */
3392         bnx_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3393         bnx_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3394         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3395         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3396
3397         /* Disable host interrupts. */
3398         bnx_disable_intr(sc);
3399
3400         /*
3401          * Tell firmware we're shutting down.
3402          */
3403         BNX_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3404
3405         /* Free the RX lists. */
3406         bnx_free_rx_ring_std(sc);
3407
3408         /* Free jumbo RX list. */
3409         if (BNX_IS_JUMBO_CAPABLE(sc))
3410                 bnx_free_rx_ring_jumbo(sc);
3411
3412         /* Free TX buffers. */
3413         bnx_free_tx_ring(sc);
3414
3415         sc->bnx_status_tag = 0;
3416         sc->bnx_link = 0;
3417         sc->bnx_coal_chg = 0;
3418
3419         sc->bnx_tx_saved_considx = BNX_TXCONS_UNSET;
3420
3421         ifp->if_flags &= ~IFF_RUNNING;
3422         ifq_clr_oactive(&ifp->if_snd);
3423         ifp->if_timer = 0;
3424 }
3425
3426 /*
3427  * Stop all chip I/O so that the kernel's probe routines don't
3428  * get confused by errant DMAs when rebooting.
3429  */
3430 static void
3431 bnx_shutdown(device_t dev)
3432 {
3433         struct bnx_softc *sc = device_get_softc(dev);
3434         struct ifnet *ifp = &sc->arpcom.ac_if;
3435
3436         lwkt_serialize_enter(ifp->if_serializer);
3437         bnx_stop(sc);
3438         bnx_reset(sc);
3439         lwkt_serialize_exit(ifp->if_serializer);
3440 }
3441
3442 static int
3443 bnx_suspend(device_t dev)
3444 {
3445         struct bnx_softc *sc = device_get_softc(dev);
3446         struct ifnet *ifp = &sc->arpcom.ac_if;
3447
3448         lwkt_serialize_enter(ifp->if_serializer);
3449         bnx_stop(sc);
3450         lwkt_serialize_exit(ifp->if_serializer);
3451
3452         return 0;
3453 }
3454
3455 static int
3456 bnx_resume(device_t dev)
3457 {
3458         struct bnx_softc *sc = device_get_softc(dev);
3459         struct ifnet *ifp = &sc->arpcom.ac_if;
3460
3461         lwkt_serialize_enter(ifp->if_serializer);
3462
3463         if (ifp->if_flags & IFF_UP) {
3464                 bnx_init(sc);
3465
3466                 if (!ifq_is_empty(&ifp->if_snd))
3467                         if_devstart(ifp);
3468         }
3469
3470         lwkt_serialize_exit(ifp->if_serializer);
3471
3472         return 0;
3473 }
3474
3475 static void
3476 bnx_setpromisc(struct bnx_softc *sc)
3477 {
3478         struct ifnet *ifp = &sc->arpcom.ac_if;
3479
3480         if (ifp->if_flags & IFF_PROMISC)
3481                 BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3482         else
3483                 BNX_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3484 }
3485
3486 static void
3487 bnx_dma_free(struct bnx_softc *sc)
3488 {
3489         int i;
3490
3491         /* Destroy RX mbuf DMA stuffs. */
3492         if (sc->bnx_cdata.bnx_rx_mtag != NULL) {
3493                 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3494                         bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag,
3495                             sc->bnx_cdata.bnx_rx_std_dmamap[i]);
3496                 }
3497                 bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag,
3498                                    sc->bnx_cdata.bnx_rx_tmpmap);
3499                 bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag);
3500         }
3501
3502         /* Destroy TX mbuf DMA stuffs. */
3503         if (sc->bnx_cdata.bnx_tx_mtag != NULL) {
3504                 for (i = 0; i < BGE_TX_RING_CNT; i++) {
3505                         bus_dmamap_destroy(sc->bnx_cdata.bnx_tx_mtag,
3506                             sc->bnx_cdata.bnx_tx_dmamap[i]);
3507                 }
3508                 bus_dma_tag_destroy(sc->bnx_cdata.bnx_tx_mtag);
3509         }
3510
3511         /* Destroy standard RX ring */
3512         bnx_dma_block_free(sc->bnx_cdata.bnx_rx_std_ring_tag,
3513                            sc->bnx_cdata.bnx_rx_std_ring_map,
3514                            sc->bnx_ldata.bnx_rx_std_ring);
3515
3516         if (BNX_IS_JUMBO_CAPABLE(sc))
3517                 bnx_free_jumbo_mem(sc);
3518
3519         /* Destroy RX return ring */
3520         bnx_dma_block_free(sc->bnx_cdata.bnx_rx_return_ring_tag,
3521                            sc->bnx_cdata.bnx_rx_return_ring_map,
3522                            sc->bnx_ldata.bnx_rx_return_ring);
3523
3524         /* Destroy TX ring */
3525         bnx_dma_block_free(sc->bnx_cdata.bnx_tx_ring_tag,
3526                            sc->bnx_cdata.bnx_tx_ring_map,
3527                            sc->bnx_ldata.bnx_tx_ring);
3528
3529         /* Destroy status block */
3530         bnx_dma_block_free(sc->bnx_cdata.bnx_status_tag,
3531                            sc->bnx_cdata.bnx_status_map,
3532                            sc->bnx_ldata.bnx_status_block);
3533
3534         /* Destroy the parent tag */
3535         if (sc->bnx_cdata.bnx_parent_tag != NULL)
3536                 bus_dma_tag_destroy(sc->bnx_cdata.bnx_parent_tag);
3537 }
3538
3539 static int
3540 bnx_dma_alloc(struct bnx_softc *sc)
3541 {
3542         struct ifnet *ifp = &sc->arpcom.ac_if;
3543         bus_size_t txmaxsz;
3544         int i, error;
3545
3546         /*
3547          * Allocate the parent bus DMA tag appropriate for PCI.
3548          *
3549          * All of the NetExtreme/NetLink controllers have 4GB boundary
3550          * DMA bug.
3551          * Whenever an address crosses a multiple of the 4GB boundary
3552          * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
3553          * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
3554          * state machine will lockup and cause the device to hang.
3555          */
3556         error = bus_dma_tag_create(NULL, 1, BGE_DMA_BOUNDARY_4G,
3557                                    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3558                                    NULL, NULL,
3559                                    BUS_SPACE_MAXSIZE_32BIT, 0,
3560                                    BUS_SPACE_MAXSIZE_32BIT,
3561                                    0, &sc->bnx_cdata.bnx_parent_tag);
3562         if (error) {
3563                 if_printf(ifp, "could not allocate parent dma tag\n");
3564                 return error;
3565         }
3566
3567         /*
3568          * Create DMA tag and maps for RX mbufs.
3569          */
3570         error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0,
3571                                    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3572                                    NULL, NULL, MCLBYTES, 1, MCLBYTES,
3573                                    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
3574                                    &sc->bnx_cdata.bnx_rx_mtag);
3575         if (error) {
3576                 if_printf(ifp, "could not allocate RX mbuf dma tag\n");
3577                 return error;
3578         }
3579
3580         error = bus_dmamap_create(sc->bnx_cdata.bnx_rx_mtag,
3581                                   BUS_DMA_WAITOK, &sc->bnx_cdata.bnx_rx_tmpmap);
3582         if (error) {
3583                 bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag);
3584                 sc->bnx_cdata.bnx_rx_mtag = NULL;
3585                 return error;
3586         }
3587
3588         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3589                 error = bus_dmamap_create(sc->bnx_cdata.bnx_rx_mtag,
3590                                           BUS_DMA_WAITOK,
3591                                           &sc->bnx_cdata.bnx_rx_std_dmamap[i]);
3592                 if (error) {
3593                         int j;
3594
3595                         for (j = 0; j < i; ++j) {
3596                                 bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag,
3597                                         sc->bnx_cdata.bnx_rx_std_dmamap[j]);
3598                         }
3599                         bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag);
3600                         sc->bnx_cdata.bnx_rx_mtag = NULL;
3601
3602                         if_printf(ifp, "could not create DMA map for RX\n");
3603                         return error;
3604                 }
3605         }
3606
3607         /*
3608          * Create DMA tag and maps for TX mbufs.
3609          */
3610         if (sc->bnx_flags & BNX_FLAG_TSO)
3611                 txmaxsz = IP_MAXPACKET + sizeof(struct ether_vlan_header);
3612         else
3613                 txmaxsz = BNX_JUMBO_FRAMELEN;
3614         error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0,
3615                                    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3616                                    NULL, NULL,
3617                                    txmaxsz, BNX_NSEG_NEW, PAGE_SIZE,
3618                                    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK |
3619                                    BUS_DMA_ONEBPAGE,
3620                                    &sc->bnx_cdata.bnx_tx_mtag);
3621         if (error) {
3622                 if_printf(ifp, "could not allocate TX mbuf dma tag\n");
3623                 return error;
3624         }
3625
3626         for (i = 0; i < BGE_TX_RING_CNT; i++) {
3627                 error = bus_dmamap_create(sc->bnx_cdata.bnx_tx_mtag,
3628                                           BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
3629                                           &sc->bnx_cdata.bnx_tx_dmamap[i]);
3630                 if (error) {
3631                         int j;
3632
3633                         for (j = 0; j < i; ++j) {
3634                                 bus_dmamap_destroy(sc->bnx_cdata.bnx_tx_mtag,
3635                                         sc->bnx_cdata.bnx_tx_dmamap[j]);
3636                         }
3637                         bus_dma_tag_destroy(sc->bnx_cdata.bnx_tx_mtag);
3638                         sc->bnx_cdata.bnx_tx_mtag = NULL;
3639
3640                         if_printf(ifp, "could not create DMA map for TX\n");
3641                         return error;
3642                 }
3643         }
3644
3645         /*
3646          * Create DMA stuffs for standard RX ring.
3647          */
3648         error = bnx_dma_block_alloc(sc, BGE_STD_RX_RING_SZ,
3649                                     &sc->bnx_cdata.bnx_rx_std_ring_tag,
3650                                     &sc->bnx_cdata.bnx_rx_std_ring_map,
3651                                     (void *)&sc->bnx_ldata.bnx_rx_std_ring,
3652                                     &sc->bnx_ldata.bnx_rx_std_ring_paddr);
3653         if (error) {
3654                 if_printf(ifp, "could not create std RX ring\n");
3655                 return error;
3656         }
3657
3658         /*
3659          * Create jumbo buffer pool.
3660          */
3661         if (BNX_IS_JUMBO_CAPABLE(sc)) {
3662                 error = bnx_alloc_jumbo_mem(sc);
3663                 if (error) {
3664                         if_printf(ifp, "could not create jumbo buffer pool\n");
3665                         return error;
3666                 }
3667         }
3668
3669         /*
3670          * Create DMA stuffs for RX return ring.
3671          */
3672         error = bnx_dma_block_alloc(sc,
3673             BGE_RX_RTN_RING_SZ(sc->bnx_return_ring_cnt),
3674             &sc->bnx_cdata.bnx_rx_return_ring_tag,
3675             &sc->bnx_cdata.bnx_rx_return_ring_map,
3676             (void *)&sc->bnx_ldata.bnx_rx_return_ring,
3677             &sc->bnx_ldata.bnx_rx_return_ring_paddr);
3678         if (error) {
3679                 if_printf(ifp, "could not create RX ret ring\n");
3680                 return error;
3681         }
3682
3683         /*
3684          * Create DMA stuffs for TX ring.
3685          */
3686         error = bnx_dma_block_alloc(sc, BGE_TX_RING_SZ,
3687                                     &sc->bnx_cdata.bnx_tx_ring_tag,
3688                                     &sc->bnx_cdata.bnx_tx_ring_map,
3689                                     (void *)&sc->bnx_ldata.bnx_tx_ring,
3690                                     &sc->bnx_ldata.bnx_tx_ring_paddr);
3691         if (error) {
3692                 if_printf(ifp, "could not create TX ring\n");
3693                 return error;
3694         }
3695
3696         /*
3697          * Create DMA stuffs for status block.
3698          */
3699         error = bnx_dma_block_alloc(sc, BGE_STATUS_BLK_SZ,
3700                                     &sc->bnx_cdata.bnx_status_tag,
3701                                     &sc->bnx_cdata.bnx_status_map,
3702                                     (void *)&sc->bnx_ldata.bnx_status_block,
3703                                     &sc->bnx_ldata.bnx_status_block_paddr);
3704         if (error) {
3705                 if_printf(ifp, "could not create status block\n");
3706                 return error;
3707         }
3708
3709         return 0;
3710 }
3711
3712 static int
3713 bnx_dma_block_alloc(struct bnx_softc *sc, bus_size_t size, bus_dma_tag_t *tag,
3714                     bus_dmamap_t *map, void **addr, bus_addr_t *paddr)
3715 {
3716         bus_dmamem_t dmem;
3717         int error;
3718
3719         error = bus_dmamem_coherent(sc->bnx_cdata.bnx_parent_tag, PAGE_SIZE, 0,
3720                                     BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3721                                     size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3722         if (error)
3723                 return error;
3724
3725         *tag = dmem.dmem_tag;
3726         *map = dmem.dmem_map;
3727         *addr = dmem.dmem_addr;
3728         *paddr = dmem.dmem_busaddr;
3729
3730         return 0;
3731 }
3732
3733 static void
3734 bnx_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr)
3735 {
3736         if (tag != NULL) {
3737                 bus_dmamap_unload(tag, map);
3738                 bus_dmamem_free(tag, addr, map);
3739                 bus_dma_tag_destroy(tag);
3740         }
3741 }
3742
3743 static void
3744 bnx_tbi_link_upd(struct bnx_softc *sc, uint32_t status)
3745 {
3746         struct ifnet *ifp = &sc->arpcom.ac_if;
3747
3748 #define PCS_ENCODE_ERR  (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE)
3749
3750         /*
3751          * Sometimes PCS encoding errors are detected in
3752          * TBI mode (on fiber NICs), and for some reason
3753          * the chip will signal them as link changes.
3754          * If we get a link change event, but the 'PCS
3755          * encoding error' bit in the MAC status register
3756          * is set, don't bother doing a link check.
3757          * This avoids spurious "gigabit link up" messages
3758          * that sometimes appear on fiber NICs during
3759          * periods of heavy traffic.
3760          */
3761         if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3762                 if (!sc->bnx_link) {
3763                         sc->bnx_link++;
3764                         if (sc->bnx_asicrev == BGE_ASICREV_BCM5704) {
3765                                 BNX_CLRBIT(sc, BGE_MAC_MODE,
3766                                     BGE_MACMODE_TBI_SEND_CFGS);
3767                         }
3768                         CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3769
3770                         if (bootverbose)
3771                                 if_printf(ifp, "link UP\n");
3772
3773                         ifp->if_link_state = LINK_STATE_UP;
3774                         if_link_state_change(ifp);
3775                 }
3776         } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) {
3777                 if (sc->bnx_link) {
3778                         sc->bnx_link = 0;
3779
3780                         if (bootverbose)
3781                                 if_printf(ifp, "link DOWN\n");
3782
3783                         ifp->if_link_state = LINK_STATE_DOWN;
3784                         if_link_state_change(ifp);
3785                 }
3786         }
3787
3788 #undef PCS_ENCODE_ERR
3789
3790         /* Clear the attention. */
3791         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3792             BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3793             BGE_MACSTAT_LINK_CHANGED);
3794 }
3795
3796 static void
3797 bnx_copper_link_upd(struct bnx_softc *sc, uint32_t status __unused)
3798 {
3799         struct ifnet *ifp = &sc->arpcom.ac_if;
3800         struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3801
3802         mii_pollstat(mii);
3803         bnx_miibus_statchg(sc->bnx_dev);
3804
3805         if (bootverbose) {
3806                 if (sc->bnx_link)
3807                         if_printf(ifp, "link UP\n");
3808                 else
3809                         if_printf(ifp, "link DOWN\n");
3810         }
3811
3812         /* Clear the attention. */
3813         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3814             BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3815             BGE_MACSTAT_LINK_CHANGED);
3816 }
3817
3818 static void
3819 bnx_autopoll_link_upd(struct bnx_softc *sc, uint32_t status __unused)
3820 {
3821         struct ifnet *ifp = &sc->arpcom.ac_if;
3822         struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3823
3824         mii_pollstat(mii);
3825
3826         if (!sc->bnx_link &&
3827             (mii->mii_media_status & IFM_ACTIVE) &&
3828             IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3829                 sc->bnx_link++;
3830                 if (bootverbose)
3831                         if_printf(ifp, "link UP\n");
3832         } else if (sc->bnx_link &&
3833             (!(mii->mii_media_status & IFM_ACTIVE) ||
3834             IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3835                 sc->bnx_link = 0;
3836                 if (bootverbose)
3837                         if_printf(ifp, "link DOWN\n");
3838         }
3839
3840         /* Clear the attention. */
3841         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3842             BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3843             BGE_MACSTAT_LINK_CHANGED);
3844 }
3845
3846 static int
3847 bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS)
3848 {
3849         struct bnx_softc *sc = arg1;
3850
3851         return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3852             &sc->bnx_rx_coal_ticks,
3853             BNX_RX_COAL_TICKS_MIN, BNX_RX_COAL_TICKS_MAX,
3854             BNX_RX_COAL_TICKS_CHG);
3855 }
3856
3857 static int
3858 bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS)
3859 {
3860         struct bnx_softc *sc = arg1;
3861
3862         return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3863             &sc->bnx_tx_coal_ticks,
3864             BNX_TX_COAL_TICKS_MIN, BNX_TX_COAL_TICKS_MAX,
3865             BNX_TX_COAL_TICKS_CHG);
3866 }
3867
3868 static int
3869 bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS)
3870 {
3871         struct bnx_softc *sc = arg1;
3872
3873         return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3874             &sc->bnx_rx_coal_bds,
3875             BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX,
3876             BNX_RX_COAL_BDS_CHG);
3877 }
3878
3879 static int
3880 bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS)
3881 {
3882         struct bnx_softc *sc = arg1;
3883
3884         return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3885             &sc->bnx_tx_coal_bds,
3886             BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX,
3887             BNX_TX_COAL_BDS_CHG);
3888 }
3889
3890 static int
3891 bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS)
3892 {
3893         struct bnx_softc *sc = arg1;
3894
3895         return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3896             &sc->bnx_rx_coal_bds_int,
3897             BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX,
3898             BNX_RX_COAL_BDS_INT_CHG);
3899 }
3900
3901 static int
3902 bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS)
3903 {
3904         struct bnx_softc *sc = arg1;
3905
3906         return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3907             &sc->bnx_tx_coal_bds_int,
3908             BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX,
3909             BNX_TX_COAL_BDS_INT_CHG);
3910 }
3911
3912 static int
3913 bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal,
3914     int coal_min, int coal_max, uint32_t coal_chg_mask)
3915 {
3916         struct bnx_softc *sc = arg1;
3917         struct ifnet *ifp = &sc->arpcom.ac_if;
3918         int error = 0, v;
3919
3920         lwkt_serialize_enter(ifp->if_serializer);
3921
3922         v = *coal;
3923         error = sysctl_handle_int(oidp, &v, 0, req);
3924         if (!error && req->newptr != NULL) {
3925                 if (v < coal_min || v > coal_max) {
3926                         error = EINVAL;
3927                 } else {
3928                         *coal = v;
3929                         sc->bnx_coal_chg |= coal_chg_mask;
3930                 }
3931         }
3932
3933         lwkt_serialize_exit(ifp->if_serializer);
3934         return error;
3935 }
3936
3937 static void
3938 bnx_coal_change(struct bnx_softc *sc)
3939 {
3940         struct ifnet *ifp = &sc->arpcom.ac_if;
3941         uint32_t val;
3942
3943         ASSERT_SERIALIZED(ifp->if_serializer);
3944
3945         if (sc->bnx_coal_chg & BNX_RX_COAL_TICKS_CHG) {
3946                 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS,
3947                             sc->bnx_rx_coal_ticks);
3948                 DELAY(10);
3949                 val = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
3950
3951                 if (bootverbose) {
3952                         if_printf(ifp, "rx_coal_ticks -> %u\n",
3953                                   sc->bnx_rx_coal_ticks);
3954                 }
3955         }
3956
3957         if (sc->bnx_coal_chg & BNX_TX_COAL_TICKS_CHG) {
3958                 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS,
3959                             sc->bnx_tx_coal_ticks);
3960                 DELAY(10);
3961                 val = CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS);
3962
3963                 if (bootverbose) {
3964                         if_printf(ifp, "tx_coal_ticks -> %u\n",
3965                                   sc->bnx_tx_coal_ticks);
3966                 }
3967         }
3968
3969         if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_CHG) {
3970                 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS,
3971                             sc->bnx_rx_coal_bds);
3972                 DELAY(10);
3973                 val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
3974
3975                 if (bootverbose) {
3976                         if_printf(ifp, "rx_coal_bds -> %u\n",
3977                                   sc->bnx_rx_coal_bds);
3978                 }
3979         }
3980
3981         if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_CHG) {
3982                 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS,
3983                             sc->bnx_tx_coal_bds);
3984                 DELAY(10);
3985                 val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS);
3986
3987                 if (bootverbose) {
3988                         if_printf(ifp, "tx_coal_bds -> %u\n",
3989                                   sc->bnx_tx_coal_bds);
3990                 }
3991         }
3992
3993         if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_INT_CHG) {
3994                 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT,
3995                     sc->bnx_rx_coal_bds_int);
3996                 DELAY(10);
3997                 val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT);
3998
3999                 if (bootverbose) {
4000                         if_printf(ifp, "rx_coal_bds_int -> %u\n",
4001                             sc->bnx_rx_coal_bds_int);
4002                 }
4003         }
4004
4005         if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_INT_CHG) {
4006                 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT,
4007                     sc->bnx_tx_coal_bds_int);
4008                 DELAY(10);
4009                 val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT);
4010
4011                 if (bootverbose) {
4012                         if_printf(ifp, "tx_coal_bds_int -> %u\n",
4013                             sc->bnx_tx_coal_bds_int);
4014                 }
4015         }
4016
4017         sc->bnx_coal_chg = 0;
4018 }
4019
4020 static void
4021 bnx_intr_check(void *xsc)
4022 {
4023         struct bnx_softc *sc = xsc;
4024         struct ifnet *ifp = &sc->arpcom.ac_if;
4025         struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
4026
4027         lwkt_serialize_enter(ifp->if_serializer);
4028
4029         KKASSERT(mycpuid == sc->bnx_intr_cpuid);
4030
4031         if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) {
4032                 lwkt_serialize_exit(ifp->if_serializer);
4033                 return;
4034         }
4035
4036         if (sblk->bge_idx[0].bge_rx_prod_idx != sc->bnx_rx_saved_considx ||
4037             sblk->bge_idx[0].bge_tx_cons_idx != sc->bnx_tx_saved_considx) {
4038                 if (sc->bnx_rx_check_considx == sc->bnx_rx_saved_considx &&
4039                     sc->bnx_tx_check_considx == sc->bnx_tx_saved_considx) {
4040                         if (!sc->bnx_intr_maylose) {
4041                                 sc->bnx_intr_maylose = TRUE;
4042                                 goto done;
4043                         }
4044                         if (bootverbose)
4045                                 if_printf(ifp, "lost interrupt\n");
4046                         bnx_msi(sc);
4047                 }
4048         }
4049         sc->bnx_intr_maylose = FALSE;
4050         sc->bnx_rx_check_considx = sc->bnx_rx_saved_considx;
4051         sc->bnx_tx_check_considx = sc->bnx_tx_saved_considx;
4052
4053 done:
4054         callout_reset(&sc->bnx_intr_timer, BNX_INTR_CKINTVL,
4055             bnx_intr_check, sc);
4056         lwkt_serialize_exit(ifp->if_serializer);
4057 }
4058
4059 static void
4060 bnx_enable_intr(struct bnx_softc *sc)
4061 {
4062         struct ifnet *ifp = &sc->arpcom.ac_if;
4063
4064         lwkt_serialize_handler_enable(ifp->if_serializer);
4065
4066         /*
4067          * Enable interrupt.
4068          */
4069         bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24);
4070         if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) {
4071                 /* XXX Linux driver */
4072                 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24);
4073         }
4074
4075         /*
4076          * Unmask the interrupt when we stop polling.
4077          */
4078         PCI_CLRBIT(sc->bnx_dev, BGE_PCI_MISC_CTL,
4079             BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
4080
4081         /*
4082          * Trigger another interrupt, since above writing
4083          * to interrupt mailbox0 may acknowledge pending
4084          * interrupt.
4085          */
4086         BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4087
4088         if (sc->bnx_flags & BNX_FLAG_STATUSTAG_BUG) {
4089                 sc->bnx_intr_maylose = FALSE;
4090                 sc->bnx_rx_check_considx = 0;
4091                 sc->bnx_tx_check_considx = 0;
4092
4093                 if (bootverbose)
4094                         if_printf(ifp, "status tag bug workaround\n");
4095
4096                 /* 10ms check interval */
4097                 callout_reset_bycpu(&sc->bnx_intr_timer, BNX_INTR_CKINTVL,
4098                     bnx_intr_check, sc, sc->bnx_intr_cpuid);
4099         }
4100 }
4101
4102 static void
4103 bnx_disable_intr(struct bnx_softc *sc)
4104 {
4105         struct ifnet *ifp = &sc->arpcom.ac_if;
4106
4107         /*
4108          * Mask the interrupt when we start polling.
4109          */
4110         PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL,
4111             BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
4112
4113         /*
4114          * Acknowledge possible asserted interrupt.
4115          */
4116         bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4117
4118         callout_stop(&sc->bnx_intr_timer);
4119         sc->bnx_intr_maylose = FALSE;
4120         sc->bnx_rx_check_considx = 0;
4121         sc->bnx_tx_check_considx = 0;
4122
4123         sc->bnx_npoll.ifpc_stcount = 0;
4124
4125         lwkt_serialize_handler_disable(ifp->if_serializer);
4126 }
4127
4128 static int
4129 bnx_get_eaddr_mem(struct bnx_softc *sc, uint8_t ether_addr[])
4130 {
4131         uint32_t mac_addr;
4132         int ret = 1;
4133
4134         mac_addr = bnx_readmem_ind(sc, 0x0c14);
4135         if ((mac_addr >> 16) == 0x484b) {
4136                 ether_addr[0] = (uint8_t)(mac_addr >> 8);
4137                 ether_addr[1] = (uint8_t)mac_addr;
4138                 mac_addr = bnx_readmem_ind(sc, 0x0c18);
4139                 ether_addr[2] = (uint8_t)(mac_addr >> 24);
4140                 ether_addr[3] = (uint8_t)(mac_addr >> 16);
4141                 ether_addr[4] = (uint8_t)(mac_addr >> 8);
4142                 ether_addr[5] = (uint8_t)mac_addr;
4143                 ret = 0;
4144         }
4145         return ret;
4146 }
4147
4148 static int
4149 bnx_get_eaddr_nvram(struct bnx_softc *sc, uint8_t ether_addr[])
4150 {
4151         int mac_offset = BGE_EE_MAC_OFFSET;
4152
4153         if (BNX_IS_5717_PLUS(sc)) {
4154                 int f;
4155
4156                 f = pci_get_function(sc->bnx_dev);
4157                 if (f & 1)
4158                         mac_offset = BGE_EE_MAC_OFFSET_5717;
4159                 if (f > 1)
4160                         mac_offset += BGE_EE_MAC_OFFSET_5717_OFF;
4161         } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
4162                 mac_offset = BGE_EE_MAC_OFFSET_5906;
4163         }
4164
4165         return bnx_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN);
4166 }
4167
4168 static int
4169 bnx_get_eaddr_eeprom(struct bnx_softc *sc, uint8_t ether_addr[])
4170 {
4171         if (sc->bnx_flags & BNX_FLAG_NO_EEPROM)
4172                 return 1;
4173
4174         return bnx_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
4175                                ETHER_ADDR_LEN);
4176 }
4177
4178 static int
4179 bnx_get_eaddr(struct bnx_softc *sc, uint8_t eaddr[])
4180 {
4181         static const bnx_eaddr_fcn_t bnx_eaddr_funcs[] = {
4182                 /* NOTE: Order is critical */
4183                 bnx_get_eaddr_mem,
4184                 bnx_get_eaddr_nvram,
4185                 bnx_get_eaddr_eeprom,
4186                 NULL
4187         };
4188         const bnx_eaddr_fcn_t *func;
4189
4190         for (func = bnx_eaddr_funcs; *func != NULL; ++func) {
4191                 if ((*func)(sc, eaddr) == 0)
4192                         break;
4193         }
4194         return (*func == NULL ? ENXIO : 0);
4195 }
4196
4197 /*
4198  * NOTE: 'm' is not freed upon failure
4199  */
4200 struct mbuf *
4201 bnx_defrag_shortdma(struct mbuf *m)
4202 {
4203         struct mbuf *n;
4204         int found;
4205
4206         /*
4207          * If device receive two back-to-back send BDs with less than
4208          * or equal to 8 total bytes then the device may hang.  The two
4209          * back-to-back send BDs must in the same frame for this failure
4210          * to occur.  Scan mbuf chains and see whether two back-to-back
4211          * send BDs are there.  If this is the case, allocate new mbuf
4212          * and copy the frame to workaround the silicon bug.
4213          */
4214         for (n = m, found = 0; n != NULL; n = n->m_next) {
4215                 if (n->m_len < 8) {
4216                         found++;
4217                         if (found > 1)
4218                                 break;
4219                         continue;
4220                 }
4221                 found = 0;
4222         }
4223
4224         if (found > 1)
4225                 n = m_defrag(m, MB_DONTWAIT);
4226         else
4227                 n = m;
4228         return n;
4229 }
4230
4231 static void
4232 bnx_stop_block(struct bnx_softc *sc, bus_size_t reg, uint32_t bit)
4233 {
4234         int i;
4235
4236         BNX_CLRBIT(sc, reg, bit);
4237         for (i = 0; i < BNX_TIMEOUT; i++) {
4238                 if ((CSR_READ_4(sc, reg) & bit) == 0)
4239                         return;
4240                 DELAY(100);
4241         }
4242 }
4243
4244 static void
4245 bnx_link_poll(struct bnx_softc *sc)
4246 {
4247         uint32_t status;
4248
4249         status = CSR_READ_4(sc, BGE_MAC_STS);
4250         if ((status & sc->bnx_link_chg) || sc->bnx_link_evt) {
4251                 sc->bnx_link_evt = 0;
4252                 sc->bnx_link_upd(sc, status);
4253         }
4254 }
4255
4256 static void
4257 bnx_enable_msi(struct bnx_softc *sc)
4258 {
4259         uint32_t msi_mode;
4260
4261         msi_mode = CSR_READ_4(sc, BGE_MSI_MODE);
4262         msi_mode |= BGE_MSIMODE_ENABLE;
4263         if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) {
4264                 /*
4265                  * NOTE:
4266                  * 5718-PG105-R says that "one shot" mode
4267                  * does not work if MSI is used, however,
4268                  * it obviously works.
4269                  */
4270                 msi_mode &= ~BGE_MSIMODE_ONESHOT_DISABLE;
4271         }
4272         CSR_WRITE_4(sc, BGE_MSI_MODE, msi_mode);
4273 }
4274
4275 static uint32_t
4276 bnx_dma_swap_options(struct bnx_softc *sc)
4277 {
4278         uint32_t dma_options;
4279
4280         dma_options = BGE_MODECTL_WORDSWAP_NONFRAME |
4281             BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA;
4282 #if BYTE_ORDER == BIG_ENDIAN
4283         dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME;
4284 #endif
4285         if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
4286                 dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA |
4287                     BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE |
4288                     BGE_MODECTL_HTX2B_ENABLE;
4289         }
4290         return dma_options;
4291 }
4292
4293 static int
4294 bnx_setup_tso(struct bnx_softc *sc, struct mbuf **mp,
4295     uint16_t *mss0, uint16_t *flags0)
4296 {
4297         struct mbuf *m;
4298         struct ip *ip;
4299         struct tcphdr *th;
4300         int thoff, iphlen, hoff, hlen;
4301         uint16_t flags, mss;
4302
4303         m = *mp;
4304         KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
4305
4306         hoff = m->m_pkthdr.csum_lhlen;
4307         iphlen = m->m_pkthdr.csum_iphlen;
4308         thoff = m->m_pkthdr.csum_thlen;
4309
4310         KASSERT(hoff > 0, ("invalid ether header len"));
4311         KASSERT(iphlen > 0, ("invalid ip header len"));
4312         KASSERT(thoff > 0, ("invalid tcp header len"));
4313
4314         if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
4315                 m = m_pullup(m, hoff + iphlen + thoff);
4316                 if (m == NULL) {
4317                         *mp = NULL;
4318                         return ENOBUFS;
4319                 }
4320                 *mp = m;
4321         }
4322         ip = mtodoff(m, struct ip *, hoff);
4323         th = mtodoff(m, struct tcphdr *, hoff + iphlen);
4324
4325         mss = m->m_pkthdr.tso_segsz;
4326         flags = BGE_TXBDFLAG_CPU_PRE_DMA | BGE_TXBDFLAG_CPU_POST_DMA;
4327
4328         ip->ip_len = htons(mss + iphlen + thoff);
4329         th->th_sum = 0;
4330
4331         hlen = (iphlen + thoff) >> 2;
4332         mss |= ((hlen & 0x3) << 14);
4333         flags |= ((hlen & 0xf8) << 7) | ((hlen & 0x4) << 2);
4334
4335         *mss0 = mss;
4336         *flags0 = flags;
4337
4338         return 0;
4339 }