bnx: Use MPSAFE callout
[dragonfly.git] / sys / dev / netif / bnx / if_bnx.c
1 /*
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
34  */
35
36
37 #include "opt_polling.h"
38
39 #include <sys/param.h>
40 #include <sys/bus.h>
41 #include <sys/endian.h>
42 #include <sys/kernel.h>
43 #include <sys/interrupt.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/queue.h>
47 #include <sys/rman.h>
48 #include <sys/serialize.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/sysctl.h>
52
53 #include <net/bpf.h>
54 #include <net/ethernet.h>
55 #include <net/if.h>
56 #include <net/if_arp.h>
57 #include <net/if_dl.h>
58 #include <net/if_media.h>
59 #include <net/if_types.h>
60 #include <net/ifq_var.h>
61 #include <net/vlan/if_vlan_var.h>
62 #include <net/vlan/if_vlan_ether.h>
63
64 #include <dev/netif/mii_layer/mii.h>
65 #include <dev/netif/mii_layer/miivar.h>
66 #include <dev/netif/mii_layer/brgphyreg.h>
67
68 #include <bus/pci/pcidevs.h>
69 #include <bus/pci/pcireg.h>
70 #include <bus/pci/pcivar.h>
71
72 #include <dev/netif/bge/if_bgereg.h>
73 #include <dev/netif/bnx/if_bnxvar.h>
74
75 /* "device miibus" required.  See GENERIC if you get errors here. */
76 #include "miibus_if.h"
77
78 #define BNX_CSUM_FEATURES       (CSUM_IP | CSUM_TCP | CSUM_UDP)
79
80 static const struct bnx_type {
81         uint16_t                bnx_vid;
82         uint16_t                bnx_did;
83         char                    *bnx_name;
84 } bnx_devs[] = {
85         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717,
86                 "Broadcom BCM5717 Gigabit Ethernet" },
87         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718,
88                 "Broadcom BCM5718 Gigabit Ethernet" },
89         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5719,
90                 "Broadcom BCM5719 Gigabit Ethernet" },
91         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720_ALT,
92                 "Broadcom BCM5720 Gigabit Ethernet" },
93
94         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57761,
95                 "Broadcom BCM57761 Gigabit Ethernet" },
96         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57762,
97                 "Broadcom BCM57762 Gigabit Ethernet" },
98         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57765,
99                 "Broadcom BCM57765 Gigabit Ethernet" },
100         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57766,
101                 "Broadcom BCM57766 Gigabit Ethernet" },
102         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57781,
103                 "Broadcom BCM57781 Gigabit Ethernet" },
104         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57782,
105                 "Broadcom BCM57782 Gigabit Ethernet" },
106         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57785,
107                 "Broadcom BCM57785 Gigabit Ethernet" },
108         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57786,
109                 "Broadcom BCM57786 Gigabit Ethernet" },
110         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57791,
111                 "Broadcom BCM57791 Fast Ethernet" },
112         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57795,
113                 "Broadcom BCM57795 Fast Ethernet" },
114
115         { 0, 0, NULL }
116 };
117
118 #define BNX_IS_JUMBO_CAPABLE(sc)        ((sc)->bnx_flags & BNX_FLAG_JUMBO)
119 #define BNX_IS_5717_PLUS(sc)            ((sc)->bnx_flags & BNX_FLAG_5717_PLUS)
120 #define BNX_IS_57765_PLUS(sc)           ((sc)->bnx_flags & BNX_FLAG_57765_PLUS)
121 #define BNX_IS_57765_FAMILY(sc)  \
122         ((sc)->bnx_flags & BNX_FLAG_57765_FAMILY)
123
124 typedef int     (*bnx_eaddr_fcn_t)(struct bnx_softc *, uint8_t[]);
125
126 static int      bnx_probe(device_t);
127 static int      bnx_attach(device_t);
128 static int      bnx_detach(device_t);
129 static void     bnx_shutdown(device_t);
130 static int      bnx_suspend(device_t);
131 static int      bnx_resume(device_t);
132 static int      bnx_miibus_readreg(device_t, int, int);
133 static int      bnx_miibus_writereg(device_t, int, int, int);
134 static void     bnx_miibus_statchg(device_t);
135
136 #ifdef DEVICE_POLLING
137 static void     bnx_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
138 #endif
139 static void     bnx_intr_legacy(void *);
140 static void     bnx_msi(void *);
141 static void     bnx_msi_oneshot(void *);
142 static void     bnx_intr(struct bnx_softc *);
143 static void     bnx_enable_intr(struct bnx_softc *);
144 static void     bnx_disable_intr(struct bnx_softc *);
145 static void     bnx_txeof(struct bnx_softc *, uint16_t);
146 static void     bnx_rxeof(struct bnx_softc *, uint16_t);
147
148 static void     bnx_start(struct ifnet *);
149 static int      bnx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
150 static void     bnx_init(void *);
151 static void     bnx_stop(struct bnx_softc *);
152 static void     bnx_watchdog(struct ifnet *);
153 static int      bnx_ifmedia_upd(struct ifnet *);
154 static void     bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *);
155 static void     bnx_tick(void *);
156
157 static int      bnx_alloc_jumbo_mem(struct bnx_softc *);
158 static void     bnx_free_jumbo_mem(struct bnx_softc *);
159 static struct bnx_jslot
160                 *bnx_jalloc(struct bnx_softc *);
161 static void     bnx_jfree(void *);
162 static void     bnx_jref(void *);
163 static int      bnx_newbuf_std(struct bnx_softc *, int, int);
164 static int      bnx_newbuf_jumbo(struct bnx_softc *, int, int);
165 static void     bnx_setup_rxdesc_std(struct bnx_softc *, int);
166 static void     bnx_setup_rxdesc_jumbo(struct bnx_softc *, int);
167 static int      bnx_init_rx_ring_std(struct bnx_softc *);
168 static void     bnx_free_rx_ring_std(struct bnx_softc *);
169 static int      bnx_init_rx_ring_jumbo(struct bnx_softc *);
170 static void     bnx_free_rx_ring_jumbo(struct bnx_softc *);
171 static void     bnx_free_tx_ring(struct bnx_softc *);
172 static int      bnx_init_tx_ring(struct bnx_softc *);
173 static int      bnx_dma_alloc(struct bnx_softc *);
174 static void     bnx_dma_free(struct bnx_softc *);
175 static int      bnx_dma_block_alloc(struct bnx_softc *, bus_size_t,
176                     bus_dma_tag_t *, bus_dmamap_t *, void **, bus_addr_t *);
177 static void     bnx_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
178 static struct mbuf *
179                 bnx_defrag_shortdma(struct mbuf *);
180 static int      bnx_encap(struct bnx_softc *, struct mbuf **, uint32_t *);
181
182 static void     bnx_reset(struct bnx_softc *);
183 static int      bnx_chipinit(struct bnx_softc *);
184 static int      bnx_blockinit(struct bnx_softc *);
185 static void     bnx_stop_block(struct bnx_softc *, bus_size_t, uint32_t);
186 static void     bnx_enable_msi(struct bnx_softc *sc);
187 static void     bnx_setmulti(struct bnx_softc *);
188 static void     bnx_setpromisc(struct bnx_softc *);
189 static void     bnx_stats_update_regs(struct bnx_softc *);
190 static uint32_t bnx_dma_swap_options(struct bnx_softc *);
191
192 static uint32_t bnx_readmem_ind(struct bnx_softc *, uint32_t);
193 static void     bnx_writemem_ind(struct bnx_softc *, uint32_t, uint32_t);
194 #ifdef notdef
195 static uint32_t bnx_readreg_ind(struct bnx_softc *, uint32_t);
196 #endif
197 static void     bnx_writereg_ind(struct bnx_softc *, uint32_t, uint32_t);
198 static void     bnx_writemem_direct(struct bnx_softc *, uint32_t, uint32_t);
199 static void     bnx_writembx(struct bnx_softc *, int, int);
200 static uint8_t  bnx_nvram_getbyte(struct bnx_softc *, int, uint8_t *);
201 static int      bnx_read_nvram(struct bnx_softc *, caddr_t, int, int);
202 static uint8_t  bnx_eeprom_getbyte(struct bnx_softc *, uint32_t, uint8_t *);
203 static int      bnx_read_eeprom(struct bnx_softc *, caddr_t, uint32_t, size_t);
204
205 static void     bnx_tbi_link_upd(struct bnx_softc *, uint32_t);
206 static void     bnx_copper_link_upd(struct bnx_softc *, uint32_t);
207 static void     bnx_autopoll_link_upd(struct bnx_softc *, uint32_t);
208 static void     bnx_link_poll(struct bnx_softc *);
209
210 static int      bnx_get_eaddr_mem(struct bnx_softc *, uint8_t[]);
211 static int      bnx_get_eaddr_nvram(struct bnx_softc *, uint8_t[]);
212 static int      bnx_get_eaddr_eeprom(struct bnx_softc *, uint8_t[]);
213 static int      bnx_get_eaddr(struct bnx_softc *, uint8_t[]);
214
215 static void     bnx_coal_change(struct bnx_softc *);
216 static int      bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS);
217 static int      bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS);
218 static int      bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS);
219 static int      bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS);
220 static int      bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS);
221 static int      bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS);
222 static int      bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *,
223                     int, int, uint32_t);
224
225 static int      bnx_msi_enable = 1;
226 TUNABLE_INT("hw.bnx.msi.enable", &bnx_msi_enable);
227
228 static device_method_t bnx_methods[] = {
229         /* Device interface */
230         DEVMETHOD(device_probe,         bnx_probe),
231         DEVMETHOD(device_attach,        bnx_attach),
232         DEVMETHOD(device_detach,        bnx_detach),
233         DEVMETHOD(device_shutdown,      bnx_shutdown),
234         DEVMETHOD(device_suspend,       bnx_suspend),
235         DEVMETHOD(device_resume,        bnx_resume),
236
237         /* bus interface */
238         DEVMETHOD(bus_print_child,      bus_generic_print_child),
239         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
240
241         /* MII interface */
242         DEVMETHOD(miibus_readreg,       bnx_miibus_readreg),
243         DEVMETHOD(miibus_writereg,      bnx_miibus_writereg),
244         DEVMETHOD(miibus_statchg,       bnx_miibus_statchg),
245
246         { 0, 0 }
247 };
248
249 static DEFINE_CLASS_0(bnx, bnx_driver, bnx_methods, sizeof(struct bnx_softc));
250 static devclass_t bnx_devclass;
251
252 DECLARE_DUMMY_MODULE(if_bnx);
253 DRIVER_MODULE(if_bnx, pci, bnx_driver, bnx_devclass, NULL, NULL);
254 DRIVER_MODULE(miibus, bnx, miibus_driver, miibus_devclass, NULL, NULL);
255
256 static uint32_t
257 bnx_readmem_ind(struct bnx_softc *sc, uint32_t off)
258 {
259         device_t dev = sc->bnx_dev;
260         uint32_t val;
261
262         if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
263             off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
264                 return 0;
265
266         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
267         val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
268         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
269         return (val);
270 }
271
272 static void
273 bnx_writemem_ind(struct bnx_softc *sc, uint32_t off, uint32_t val)
274 {
275         device_t dev = sc->bnx_dev;
276
277         if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
278             off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
279                 return;
280
281         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
282         pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
283         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
284 }
285
286 #ifdef notdef
287 static uint32_t
288 bnx_readreg_ind(struct bnx_softc *sc, uin32_t off)
289 {
290         device_t dev = sc->bnx_dev;
291
292         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
293         return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
294 }
295 #endif
296
297 static void
298 bnx_writereg_ind(struct bnx_softc *sc, uint32_t off, uint32_t val)
299 {
300         device_t dev = sc->bnx_dev;
301
302         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
303         pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
304 }
305
306 static void
307 bnx_writemem_direct(struct bnx_softc *sc, uint32_t off, uint32_t val)
308 {
309         CSR_WRITE_4(sc, off, val);
310 }
311
312 static void
313 bnx_writembx(struct bnx_softc *sc, int off, int val)
314 {
315         if (sc->bnx_asicrev == BGE_ASICREV_BCM5906)
316                 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
317
318         CSR_WRITE_4(sc, off, val);
319 }
320
321 static uint8_t
322 bnx_nvram_getbyte(struct bnx_softc *sc, int addr, uint8_t *dest)
323 {
324         uint32_t access, byte = 0;
325         int i;
326
327         /* Lock. */
328         CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
329         for (i = 0; i < 8000; i++) {
330                 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
331                         break;
332                 DELAY(20);
333         }
334         if (i == 8000)
335                 return (1);
336
337         /* Enable access. */
338         access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
339         CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
340
341         CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
342         CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
343         for (i = 0; i < BNX_TIMEOUT * 10; i++) {
344                 DELAY(10);
345                 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
346                         DELAY(10);
347                         break;
348                 }
349         }
350
351         if (i == BNX_TIMEOUT * 10) {
352                 if_printf(&sc->arpcom.ac_if, "nvram read timed out\n");
353                 return (1);
354         }
355
356         /* Get result. */
357         byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
358
359         *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
360
361         /* Disable access. */
362         CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
363
364         /* Unlock. */
365         CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
366         CSR_READ_4(sc, BGE_NVRAM_SWARB);
367
368         return (0);
369 }
370
371 /*
372  * Read a sequence of bytes from NVRAM.
373  */
374 static int
375 bnx_read_nvram(struct bnx_softc *sc, caddr_t dest, int off, int cnt)
376 {
377         int err = 0, i;
378         uint8_t byte = 0;
379
380         if (sc->bnx_asicrev != BGE_ASICREV_BCM5906)
381                 return (1);
382
383         for (i = 0; i < cnt; i++) {
384                 err = bnx_nvram_getbyte(sc, off + i, &byte);
385                 if (err)
386                         break;
387                 *(dest + i) = byte;
388         }
389
390         return (err ? 1 : 0);
391 }
392
393 /*
394  * Read a byte of data stored in the EEPROM at address 'addr.' The
395  * BCM570x supports both the traditional bitbang interface and an
396  * auto access interface for reading the EEPROM. We use the auto
397  * access method.
398  */
399 static uint8_t
400 bnx_eeprom_getbyte(struct bnx_softc *sc, uint32_t addr, uint8_t *dest)
401 {
402         int i;
403         uint32_t byte = 0;
404
405         /*
406          * Enable use of auto EEPROM access so we can avoid
407          * having to use the bitbang method.
408          */
409         BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
410
411         /* Reset the EEPROM, load the clock period. */
412         CSR_WRITE_4(sc, BGE_EE_ADDR,
413             BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
414         DELAY(20);
415
416         /* Issue the read EEPROM command. */
417         CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
418
419         /* Wait for completion */
420         for(i = 0; i < BNX_TIMEOUT * 10; i++) {
421                 DELAY(10);
422                 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
423                         break;
424         }
425
426         if (i == BNX_TIMEOUT) {
427                 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
428                 return(1);
429         }
430
431         /* Get result. */
432         byte = CSR_READ_4(sc, BGE_EE_DATA);
433
434         *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
435
436         return(0);
437 }
438
439 /*
440  * Read a sequence of bytes from the EEPROM.
441  */
442 static int
443 bnx_read_eeprom(struct bnx_softc *sc, caddr_t dest, uint32_t off, size_t len)
444 {
445         size_t i;
446         int err;
447         uint8_t byte;
448
449         for (byte = 0, err = 0, i = 0; i < len; i++) {
450                 err = bnx_eeprom_getbyte(sc, off + i, &byte);
451                 if (err)
452                         break;
453                 *(dest + i) = byte;
454         }
455
456         return(err ? 1 : 0);
457 }
458
459 static int
460 bnx_miibus_readreg(device_t dev, int phy, int reg)
461 {
462         struct bnx_softc *sc = device_get_softc(dev);
463         uint32_t val;
464         int i;
465
466         KASSERT(phy == sc->bnx_phyno,
467             ("invalid phyno %d, should be %d", phy, sc->bnx_phyno));
468
469         /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
470         if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
471                 CSR_WRITE_4(sc, BGE_MI_MODE,
472                     sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
473                 DELAY(80);
474         }
475
476         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
477             BGE_MIPHY(phy) | BGE_MIREG(reg));
478
479         /* Poll for the PHY register access to complete. */
480         for (i = 0; i < BNX_TIMEOUT; i++) {
481                 DELAY(10);
482                 val = CSR_READ_4(sc, BGE_MI_COMM);
483                 if ((val & BGE_MICOMM_BUSY) == 0) {
484                         DELAY(5);
485                         val = CSR_READ_4(sc, BGE_MI_COMM);
486                         break;
487                 }
488         }
489         if (i == BNX_TIMEOUT) {
490                 if_printf(&sc->arpcom.ac_if, "PHY read timed out "
491                     "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val);
492                 val = 0;
493         }
494
495         /* Restore the autopoll bit if necessary. */
496         if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
497                 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
498                 DELAY(80);
499         }
500
501         if (val & BGE_MICOMM_READFAIL)
502                 return 0;
503
504         return (val & 0xFFFF);
505 }
506
507 static int
508 bnx_miibus_writereg(device_t dev, int phy, int reg, int val)
509 {
510         struct bnx_softc *sc = device_get_softc(dev);
511         int i;
512
513         KASSERT(phy == sc->bnx_phyno,
514             ("invalid phyno %d, should be %d", phy, sc->bnx_phyno));
515
516         if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
517             (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
518                return 0;
519
520         /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
521         if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
522                 CSR_WRITE_4(sc, BGE_MI_MODE,
523                     sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL);
524                 DELAY(80);
525         }
526
527         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
528             BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
529
530         for (i = 0; i < BNX_TIMEOUT; i++) {
531                 DELAY(10);
532                 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
533                         DELAY(5);
534                         CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
535                         break;
536                 }
537         }
538         if (i == BNX_TIMEOUT) {
539                 if_printf(&sc->arpcom.ac_if, "PHY write timed out "
540                     "(phy %d, reg %d, val %d)\n", phy, reg, val);
541         }
542
543         /* Restore the autopoll bit if necessary. */
544         if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
545                 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
546                 DELAY(80);
547         }
548
549         return 0;
550 }
551
552 static void
553 bnx_miibus_statchg(device_t dev)
554 {
555         struct bnx_softc *sc;
556         struct mii_data *mii;
557
558         sc = device_get_softc(dev);
559         mii = device_get_softc(sc->bnx_miibus);
560
561         if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
562             (IFM_ACTIVE | IFM_AVALID)) {
563                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
564                 case IFM_10_T:
565                 case IFM_100_TX:
566                         sc->bnx_link = 1;
567                         break;
568                 case IFM_1000_T:
569                 case IFM_1000_SX:
570                 case IFM_2500_SX:
571                         if (sc->bnx_asicrev != BGE_ASICREV_BCM5906)
572                                 sc->bnx_link = 1;
573                         else
574                                 sc->bnx_link = 0;
575                         break;
576                 default:
577                         sc->bnx_link = 0;
578                         break;
579                 }
580         } else {
581                 sc->bnx_link = 0;
582         }
583         if (sc->bnx_link == 0)
584                 return;
585
586         BNX_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
587         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
588             IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
589                 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
590         } else {
591                 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
592         }
593
594         if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
595                 BNX_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
596         } else {
597                 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
598         }
599 }
600
601 /*
602  * Memory management for jumbo frames.
603  */
604 static int
605 bnx_alloc_jumbo_mem(struct bnx_softc *sc)
606 {
607         struct ifnet *ifp = &sc->arpcom.ac_if;
608         struct bnx_jslot *entry;
609         uint8_t *ptr;
610         bus_addr_t paddr;
611         int i, error;
612
613         /*
614          * Create tag for jumbo mbufs.
615          * This is really a bit of a kludge. We allocate a special
616          * jumbo buffer pool which (thanks to the way our DMA
617          * memory allocation works) will consist of contiguous
618          * pages. This means that even though a jumbo buffer might
619          * be larger than a page size, we don't really need to
620          * map it into more than one DMA segment. However, the
621          * default mbuf tag will result in multi-segment mappings,
622          * so we have to create a special jumbo mbuf tag that
623          * lets us get away with mapping the jumbo buffers as
624          * a single segment. I think eventually the driver should
625          * be changed so that it uses ordinary mbufs and cluster
626          * buffers, i.e. jumbo frames can span multiple DMA
627          * descriptors. But that's a project for another day.
628          */
629
630         /*
631          * Create DMA stuffs for jumbo RX ring.
632          */
633         error = bnx_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ,
634                                     &sc->bnx_cdata.bnx_rx_jumbo_ring_tag,
635                                     &sc->bnx_cdata.bnx_rx_jumbo_ring_map,
636                                     (void *)&sc->bnx_ldata.bnx_rx_jumbo_ring,
637                                     &sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
638         if (error) {
639                 if_printf(ifp, "could not create jumbo RX ring\n");
640                 return error;
641         }
642
643         /*
644          * Create DMA stuffs for jumbo buffer block.
645          */
646         error = bnx_dma_block_alloc(sc, BNX_JMEM,
647                                     &sc->bnx_cdata.bnx_jumbo_tag,
648                                     &sc->bnx_cdata.bnx_jumbo_map,
649                                     (void **)&sc->bnx_ldata.bnx_jumbo_buf,
650                                     &paddr);
651         if (error) {
652                 if_printf(ifp, "could not create jumbo buffer\n");
653                 return error;
654         }
655
656         SLIST_INIT(&sc->bnx_jfree_listhead);
657
658         /*
659          * Now divide it up into 9K pieces and save the addresses
660          * in an array. Note that we play an evil trick here by using
661          * the first few bytes in the buffer to hold the the address
662          * of the softc structure for this interface. This is because
663          * bnx_jfree() needs it, but it is called by the mbuf management
664          * code which will not pass it to us explicitly.
665          */
666         for (i = 0, ptr = sc->bnx_ldata.bnx_jumbo_buf; i < BNX_JSLOTS; i++) {
667                 entry = &sc->bnx_cdata.bnx_jslots[i];
668                 entry->bnx_sc = sc;
669                 entry->bnx_buf = ptr;
670                 entry->bnx_paddr = paddr;
671                 entry->bnx_inuse = 0;
672                 entry->bnx_slot = i;
673                 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead, entry, jslot_link);
674
675                 ptr += BNX_JLEN;
676                 paddr += BNX_JLEN;
677         }
678         return 0;
679 }
680
681 static void
682 bnx_free_jumbo_mem(struct bnx_softc *sc)
683 {
684         /* Destroy jumbo RX ring. */
685         bnx_dma_block_free(sc->bnx_cdata.bnx_rx_jumbo_ring_tag,
686                            sc->bnx_cdata.bnx_rx_jumbo_ring_map,
687                            sc->bnx_ldata.bnx_rx_jumbo_ring);
688
689         /* Destroy jumbo buffer block. */
690         bnx_dma_block_free(sc->bnx_cdata.bnx_jumbo_tag,
691                            sc->bnx_cdata.bnx_jumbo_map,
692                            sc->bnx_ldata.bnx_jumbo_buf);
693 }
694
695 /*
696  * Allocate a jumbo buffer.
697  */
698 static struct bnx_jslot *
699 bnx_jalloc(struct bnx_softc *sc)
700 {
701         struct bnx_jslot *entry;
702
703         lwkt_serialize_enter(&sc->bnx_jslot_serializer);
704         entry = SLIST_FIRST(&sc->bnx_jfree_listhead);
705         if (entry) {
706                 SLIST_REMOVE_HEAD(&sc->bnx_jfree_listhead, jslot_link);
707                 entry->bnx_inuse = 1;
708         } else {
709                 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
710         }
711         lwkt_serialize_exit(&sc->bnx_jslot_serializer);
712         return(entry);
713 }
714
715 /*
716  * Adjust usage count on a jumbo buffer.
717  */
718 static void
719 bnx_jref(void *arg)
720 {
721         struct bnx_jslot *entry = (struct bnx_jslot *)arg;
722         struct bnx_softc *sc = entry->bnx_sc;
723
724         if (sc == NULL)
725                 panic("bnx_jref: can't find softc pointer!");
726
727         if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) {
728                 panic("bnx_jref: asked to reference buffer "
729                     "that we don't manage!");
730         } else if (entry->bnx_inuse == 0) {
731                 panic("bnx_jref: buffer already free!");
732         } else {
733                 atomic_add_int(&entry->bnx_inuse, 1);
734         }
735 }
736
737 /*
738  * Release a jumbo buffer.
739  */
740 static void
741 bnx_jfree(void *arg)
742 {
743         struct bnx_jslot *entry = (struct bnx_jslot *)arg;
744         struct bnx_softc *sc = entry->bnx_sc;
745
746         if (sc == NULL)
747                 panic("bnx_jfree: can't find softc pointer!");
748
749         if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) {
750                 panic("bnx_jfree: asked to free buffer that we don't manage!");
751         } else if (entry->bnx_inuse == 0) {
752                 panic("bnx_jfree: buffer already free!");
753         } else {
754                 /*
755                  * Possible MP race to 0, use the serializer.  The atomic insn
756                  * is still needed for races against bnx_jref().
757                  */
758                 lwkt_serialize_enter(&sc->bnx_jslot_serializer);
759                 atomic_subtract_int(&entry->bnx_inuse, 1);
760                 if (entry->bnx_inuse == 0) {
761                         SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead, 
762                                           entry, jslot_link);
763                 }
764                 lwkt_serialize_exit(&sc->bnx_jslot_serializer);
765         }
766 }
767
768
769 /*
770  * Intialize a standard receive ring descriptor.
771  */
772 static int
773 bnx_newbuf_std(struct bnx_softc *sc, int i, int init)
774 {
775         struct mbuf *m_new = NULL;
776         bus_dma_segment_t seg;
777         bus_dmamap_t map;
778         int error, nsegs;
779
780         m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
781         if (m_new == NULL)
782                 return ENOBUFS;
783         m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
784         m_adj(m_new, ETHER_ALIGN);
785
786         error = bus_dmamap_load_mbuf_segment(sc->bnx_cdata.bnx_rx_mtag,
787                         sc->bnx_cdata.bnx_rx_tmpmap, m_new,
788                         &seg, 1, &nsegs, BUS_DMA_NOWAIT);
789         if (error) {
790                 m_freem(m_new);
791                 return error;
792         }
793
794         if (!init) {
795                 bus_dmamap_sync(sc->bnx_cdata.bnx_rx_mtag,
796                                 sc->bnx_cdata.bnx_rx_std_dmamap[i],
797                                 BUS_DMASYNC_POSTREAD);
798                 bus_dmamap_unload(sc->bnx_cdata.bnx_rx_mtag,
799                         sc->bnx_cdata.bnx_rx_std_dmamap[i]);
800         }
801
802         map = sc->bnx_cdata.bnx_rx_tmpmap;
803         sc->bnx_cdata.bnx_rx_tmpmap = sc->bnx_cdata.bnx_rx_std_dmamap[i];
804         sc->bnx_cdata.bnx_rx_std_dmamap[i] = map;
805
806         sc->bnx_cdata.bnx_rx_std_chain[i].bnx_mbuf = m_new;
807         sc->bnx_cdata.bnx_rx_std_chain[i].bnx_paddr = seg.ds_addr;
808
809         bnx_setup_rxdesc_std(sc, i);
810         return 0;
811 }
812
813 static void
814 bnx_setup_rxdesc_std(struct bnx_softc *sc, int i)
815 {
816         struct bnx_rxchain *rc;
817         struct bge_rx_bd *r;
818
819         rc = &sc->bnx_cdata.bnx_rx_std_chain[i];
820         r = &sc->bnx_ldata.bnx_rx_std_ring[i];
821
822         r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_paddr);
823         r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_paddr);
824         r->bge_len = rc->bnx_mbuf->m_len;
825         r->bge_idx = i;
826         r->bge_flags = BGE_RXBDFLAG_END;
827 }
828
829 /*
830  * Initialize a jumbo receive ring descriptor. This allocates
831  * a jumbo buffer from the pool managed internally by the driver.
832  */
833 static int
834 bnx_newbuf_jumbo(struct bnx_softc *sc, int i, int init)
835 {
836         struct mbuf *m_new = NULL;
837         struct bnx_jslot *buf;
838         bus_addr_t paddr;
839
840         /* Allocate the mbuf. */
841         MGETHDR(m_new, init ? MB_WAIT : MB_DONTWAIT, MT_DATA);
842         if (m_new == NULL)
843                 return ENOBUFS;
844
845         /* Allocate the jumbo buffer */
846         buf = bnx_jalloc(sc);
847         if (buf == NULL) {
848                 m_freem(m_new);
849                 return ENOBUFS;
850         }
851
852         /* Attach the buffer to the mbuf. */
853         m_new->m_ext.ext_arg = buf;
854         m_new->m_ext.ext_buf = buf->bnx_buf;
855         m_new->m_ext.ext_free = bnx_jfree;
856         m_new->m_ext.ext_ref = bnx_jref;
857         m_new->m_ext.ext_size = BNX_JUMBO_FRAMELEN;
858
859         m_new->m_flags |= M_EXT;
860
861         m_new->m_data = m_new->m_ext.ext_buf;
862         m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
863
864         paddr = buf->bnx_paddr;
865         m_adj(m_new, ETHER_ALIGN);
866         paddr += ETHER_ALIGN;
867
868         /* Save necessary information */
869         sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_mbuf = m_new;
870         sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_paddr = paddr;
871
872         /* Set up the descriptor. */
873         bnx_setup_rxdesc_jumbo(sc, i);
874         return 0;
875 }
876
877 static void
878 bnx_setup_rxdesc_jumbo(struct bnx_softc *sc, int i)
879 {
880         struct bge_rx_bd *r;
881         struct bnx_rxchain *rc;
882
883         r = &sc->bnx_ldata.bnx_rx_jumbo_ring[i];
884         rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i];
885
886         r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_paddr);
887         r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_paddr);
888         r->bge_len = rc->bnx_mbuf->m_len;
889         r->bge_idx = i;
890         r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
891 }
892
893 static int
894 bnx_init_rx_ring_std(struct bnx_softc *sc)
895 {
896         int i, error;
897
898         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
899                 error = bnx_newbuf_std(sc, i, 1);
900                 if (error)
901                         return error;
902         };
903
904         sc->bnx_std = BGE_STD_RX_RING_CNT - 1;
905         bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bnx_std);
906
907         return(0);
908 }
909
910 static void
911 bnx_free_rx_ring_std(struct bnx_softc *sc)
912 {
913         int i;
914
915         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
916                 struct bnx_rxchain *rc = &sc->bnx_cdata.bnx_rx_std_chain[i];
917
918                 if (rc->bnx_mbuf != NULL) {
919                         bus_dmamap_unload(sc->bnx_cdata.bnx_rx_mtag,
920                                           sc->bnx_cdata.bnx_rx_std_dmamap[i]);
921                         m_freem(rc->bnx_mbuf);
922                         rc->bnx_mbuf = NULL;
923                 }
924                 bzero(&sc->bnx_ldata.bnx_rx_std_ring[i],
925                     sizeof(struct bge_rx_bd));
926         }
927 }
928
929 static int
930 bnx_init_rx_ring_jumbo(struct bnx_softc *sc)
931 {
932         struct bge_rcb *rcb;
933         int i, error;
934
935         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
936                 error = bnx_newbuf_jumbo(sc, i, 1);
937                 if (error)
938                         return error;
939         };
940
941         sc->bnx_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
942
943         rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb;
944         rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
945         CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
946
947         bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo);
948
949         return(0);
950 }
951
952 static void
953 bnx_free_rx_ring_jumbo(struct bnx_softc *sc)
954 {
955         int i;
956
957         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
958                 struct bnx_rxchain *rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i];
959
960                 if (rc->bnx_mbuf != NULL) {
961                         m_freem(rc->bnx_mbuf);
962                         rc->bnx_mbuf = NULL;
963                 }
964                 bzero(&sc->bnx_ldata.bnx_rx_jumbo_ring[i],
965                     sizeof(struct bge_rx_bd));
966         }
967 }
968
969 static void
970 bnx_free_tx_ring(struct bnx_softc *sc)
971 {
972         int i;
973
974         for (i = 0; i < BGE_TX_RING_CNT; i++) {
975                 if (sc->bnx_cdata.bnx_tx_chain[i] != NULL) {
976                         bus_dmamap_unload(sc->bnx_cdata.bnx_tx_mtag,
977                                           sc->bnx_cdata.bnx_tx_dmamap[i]);
978                         m_freem(sc->bnx_cdata.bnx_tx_chain[i]);
979                         sc->bnx_cdata.bnx_tx_chain[i] = NULL;
980                 }
981                 bzero(&sc->bnx_ldata.bnx_tx_ring[i],
982                     sizeof(struct bge_tx_bd));
983         }
984 }
985
986 static int
987 bnx_init_tx_ring(struct bnx_softc *sc)
988 {
989         sc->bnx_txcnt = 0;
990         sc->bnx_tx_saved_considx = 0;
991         sc->bnx_tx_prodidx = 0;
992
993         /* Initialize transmit producer index for host-memory send ring. */
994         bnx_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bnx_tx_prodidx);
995         bnx_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
996
997         return(0);
998 }
999
1000 static void
1001 bnx_setmulti(struct bnx_softc *sc)
1002 {
1003         struct ifnet *ifp;
1004         struct ifmultiaddr *ifma;
1005         uint32_t hashes[4] = { 0, 0, 0, 0 };
1006         int h, i;
1007
1008         ifp = &sc->arpcom.ac_if;
1009
1010         if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1011                 for (i = 0; i < 4; i++)
1012                         CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1013                 return;
1014         }
1015
1016         /* First, zot all the existing filters. */
1017         for (i = 0; i < 4; i++)
1018                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1019
1020         /* Now program new ones. */
1021         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1022                 if (ifma->ifma_addr->sa_family != AF_LINK)
1023                         continue;
1024                 h = ether_crc32_le(
1025                     LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1026                     ETHER_ADDR_LEN) & 0x7f;
1027                 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1028         }
1029
1030         for (i = 0; i < 4; i++)
1031                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1032 }
1033
1034 /*
1035  * Do endian, PCI and DMA initialization. Also check the on-board ROM
1036  * self-test results.
1037  */
1038 static int
1039 bnx_chipinit(struct bnx_softc *sc)
1040 {
1041         uint32_t dma_rw_ctl, mode_ctl;
1042         int i;
1043
1044         /* Set endian type before we access any non-PCI registers. */
1045         pci_write_config(sc->bnx_dev, BGE_PCI_MISC_CTL,
1046             BGE_INIT | BGE_PCIMISCCTL_TAGGED_STATUS, 4);
1047
1048         /* Clear the MAC control register */
1049         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1050
1051         /*
1052          * Clear the MAC statistics block in the NIC's
1053          * internal memory.
1054          */
1055         for (i = BGE_STATS_BLOCK;
1056             i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1057                 BNX_MEMWIN_WRITE(sc, i, 0);
1058
1059         for (i = BGE_STATUS_BLOCK;
1060             i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1061                 BNX_MEMWIN_WRITE(sc, i, 0);
1062
1063         if (BNX_IS_57765_FAMILY(sc)) {
1064                 uint32_t val;
1065
1066                 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) {
1067                         mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
1068                         val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS;
1069
1070                         /* Access the lower 1K of PL PCI-E block registers. */
1071                         CSR_WRITE_4(sc, BGE_MODE_CTL,
1072                             val | BGE_MODECTL_PCIE_PL_SEL);
1073
1074                         val = CSR_READ_4(sc, BGE_PCIE_PL_LO_PHYCTL5);
1075                         val |= BGE_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ;
1076                         CSR_WRITE_4(sc, BGE_PCIE_PL_LO_PHYCTL5, val);
1077
1078                         CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1079                 }
1080                 if (sc->bnx_chiprev != BGE_CHIPREV_57765_AX) {
1081                         mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
1082                         val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS;
1083
1084                         /* Access the lower 1K of DL PCI-E block registers. */
1085                         CSR_WRITE_4(sc, BGE_MODE_CTL,
1086                             val | BGE_MODECTL_PCIE_DL_SEL);
1087
1088                         val = CSR_READ_4(sc, BGE_PCIE_DL_LO_FTSMAX);
1089                         val &= ~BGE_PCIE_DL_LO_FTSMAX_MASK;
1090                         val |= BGE_PCIE_DL_LO_FTSMAX_VAL;
1091                         CSR_WRITE_4(sc, BGE_PCIE_DL_LO_FTSMAX, val);
1092
1093                         CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1094                 }
1095
1096                 val = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK);
1097                 val &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK;
1098                 val |= BGE_CPMU_LSPD_10MB_MACCLK_6_25;
1099                 CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, val);
1100         }
1101
1102         /*
1103          * Set up the PCI DMA control register.
1104          */
1105         dma_rw_ctl = pci_read_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, 4);
1106         /*
1107          * Disable 32bytes cache alignment for DMA write to host memory
1108          *
1109          * NOTE:
1110          * 64bytes cache alignment for DMA write to host memory is still
1111          * enabled.
1112          */
1113         dma_rw_ctl |= BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1114         if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0)
1115                 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1116         /*
1117          * Enable HW workaround for controllers that misinterpret
1118          * a status tag update and leave interrupts permanently
1119          * disabled.
1120          */
1121         if (sc->bnx_asicrev != BGE_ASICREV_BCM5717 &&
1122             !BNX_IS_57765_FAMILY(sc))
1123                 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1124         if (bootverbose) {
1125                 if_printf(&sc->arpcom.ac_if, "DMA read/write %#x\n",
1126                     dma_rw_ctl);
1127         }
1128         pci_write_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1129
1130         /*
1131          * Set up general mode register.
1132          */
1133         mode_ctl = bnx_dma_swap_options(sc) | BGE_MODECTL_MAC_ATTN_INTR |
1134             BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM;
1135         CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1136
1137         /*
1138          * Disable memory write invalidate.  Apparently it is not supported
1139          * properly by these devices.  Also ensure that INTx isn't disabled,
1140          * as these chips need it even when using MSI.
1141          */
1142         PCI_CLRBIT(sc->bnx_dev, BGE_PCI_CMD,
1143             (PCIM_CMD_MWRICEN | PCIM_CMD_INTxDIS), 4);
1144
1145         /* Set the timer prescaler (always 66Mhz) */
1146         CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1147
1148         if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
1149                 DELAY(40);      /* XXX */
1150
1151                 /* Put PHY into ready state */
1152                 BNX_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1153                 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1154                 DELAY(40);
1155         }
1156
1157         return(0);
1158 }
1159
1160 static int
1161 bnx_blockinit(struct bnx_softc *sc)
1162 {
1163         struct bge_rcb *rcb;
1164         bus_size_t vrcb;
1165         bge_hostaddr taddr;
1166         uint32_t val;
1167         int i, limit;
1168
1169         /*
1170          * Initialize the memory window pointer register so that
1171          * we can access the first 32K of internal NIC RAM. This will
1172          * allow us to set up the TX send ring RCBs and the RX return
1173          * ring RCBs, plus other things which live in NIC memory.
1174          */
1175         CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1176
1177         /* Configure mbuf pool watermarks */
1178         if (BNX_IS_57765_PLUS(sc)) {
1179                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1180                 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) {
1181                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1182                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1183                 } else {
1184                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1185                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1186                 }
1187         } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
1188                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1189                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1190                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1191         } else {
1192                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1193                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1194                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1195         }
1196
1197         /* Configure DMA resource watermarks */
1198         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1199         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1200
1201         /* Enable buffer manager */
1202         val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1203         /*
1204          * Change the arbitration algorithm of TXMBUF read request to
1205          * round-robin instead of priority based for BCM5719.  When
1206          * TXFIFO is almost empty, RDMA will hold its request until
1207          * TXFIFO is not almost empty.
1208          */
1209         if (sc->bnx_asicrev == BGE_ASICREV_BCM5719)
1210                 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
1211         if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1212             sc->bnx_chipid == BGE_CHIPID_BCM5719_A0 ||
1213             sc->bnx_chipid == BGE_CHIPID_BCM5720_A0)
1214                 val |= BGE_BMANMODE_LOMBUF_ATTN;
1215         CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1216
1217         /* Poll for buffer manager start indication */
1218         for (i = 0; i < BNX_TIMEOUT; i++) {
1219                 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1220                         break;
1221                 DELAY(10);
1222         }
1223
1224         if (i == BNX_TIMEOUT) {
1225                 if_printf(&sc->arpcom.ac_if,
1226                           "buffer manager failed to start\n");
1227                 return(ENXIO);
1228         }
1229
1230         /* Enable flow-through queues */
1231         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1232         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1233
1234         /* Wait until queue initialization is complete */
1235         for (i = 0; i < BNX_TIMEOUT; i++) {
1236                 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1237                         break;
1238                 DELAY(10);
1239         }
1240
1241         if (i == BNX_TIMEOUT) {
1242                 if_printf(&sc->arpcom.ac_if,
1243                           "flow-through queue init failed\n");
1244                 return(ENXIO);
1245         }
1246
1247         /*
1248          * Summary of rings supported by the controller:
1249          *
1250          * Standard Receive Producer Ring
1251          * - This ring is used to feed receive buffers for "standard"
1252          *   sized frames (typically 1536 bytes) to the controller.
1253          *
1254          * Jumbo Receive Producer Ring
1255          * - This ring is used to feed receive buffers for jumbo sized
1256          *   frames (i.e. anything bigger than the "standard" frames)
1257          *   to the controller.
1258          *
1259          * Mini Receive Producer Ring
1260          * - This ring is used to feed receive buffers for "mini"
1261          *   sized frames to the controller.
1262          * - This feature required external memory for the controller
1263          *   but was never used in a production system.  Should always
1264          *   be disabled.
1265          *
1266          * Receive Return Ring
1267          * - After the controller has placed an incoming frame into a
1268          *   receive buffer that buffer is moved into a receive return
1269          *   ring.  The driver is then responsible to passing the
1270          *   buffer up to the stack.  Many versions of the controller
1271          *   support multiple RR rings.
1272          *
1273          * Send Ring
1274          * - This ring is used for outgoing frames.  Many versions of
1275          *   the controller support multiple send rings.
1276          */
1277
1278         /* Initialize the standard receive producer ring control block. */
1279         rcb = &sc->bnx_ldata.bnx_info.bnx_std_rx_rcb;
1280         rcb->bge_hostaddr.bge_addr_lo =
1281             BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_std_ring_paddr);
1282         rcb->bge_hostaddr.bge_addr_hi =
1283             BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_std_ring_paddr);
1284         if (BNX_IS_57765_PLUS(sc)) {
1285                 /*
1286                  * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1287                  * Bits 15-2 : Maximum RX frame size
1288                  * Bit 1     : 1 = Ring Disabled, 0 = Ring ENabled
1289                  * Bit 0     : Reserved
1290                  */
1291                 rcb->bge_maxlen_flags =
1292                     BGE_RCB_MAXLEN_FLAGS(512, BNX_MAX_FRAMELEN << 2);
1293         } else {
1294                 /*
1295                  * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1296                  * Bits 15-2 : Reserved (should be 0)
1297                  * Bit 1     : 1 = Ring Disabled, 0 = Ring Enabled
1298                  * Bit 0     : Reserved
1299                  */
1300                 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1301         }
1302         if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1303             sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1304             sc->bnx_asicrev == BGE_ASICREV_BCM5720)
1305                 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1306         else
1307                 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1308         /* Write the standard receive producer ring control block. */
1309         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1310         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1311         CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1312         CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1313         /* Reset the standard receive producer ring producer index. */
1314         bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1315
1316         /*
1317          * Initialize the jumbo RX producer ring control
1318          * block.  We set the 'ring disabled' bit in the
1319          * flags field until we're actually ready to start
1320          * using this ring (i.e. once we set the MTU
1321          * high enough to require it).
1322          */
1323         if (BNX_IS_JUMBO_CAPABLE(sc)) {
1324                 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb;
1325                 /* Get the jumbo receive producer ring RCB parameters. */
1326                 rcb->bge_hostaddr.bge_addr_lo =
1327                     BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
1328                 rcb->bge_hostaddr.bge_addr_hi =
1329                     BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr);
1330                 rcb->bge_maxlen_flags =
1331                     BGE_RCB_MAXLEN_FLAGS(BNX_MAX_FRAMELEN,
1332                     BGE_RCB_FLAG_RING_DISABLED);
1333                 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1334                     sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1335                     sc->bnx_asicrev == BGE_ASICREV_BCM5720)
1336                         rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1337                 else
1338                         rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1339                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1340                     rcb->bge_hostaddr.bge_addr_hi);
1341                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1342                     rcb->bge_hostaddr.bge_addr_lo);
1343                 /* Program the jumbo receive producer ring RCB parameters. */
1344                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1345                     rcb->bge_maxlen_flags);
1346                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1347                 /* Reset the jumbo receive producer ring producer index. */
1348                 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1349         }
1350
1351         /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
1352         if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 &&
1353             (sc->bnx_chipid == BGE_CHIPID_BCM5906_A0 ||
1354              sc->bnx_chipid == BGE_CHIPID_BCM5906_A1 ||
1355              sc->bnx_chipid == BGE_CHIPID_BCM5906_A2)) {
1356                 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1357                     (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1358         }
1359
1360         /*
1361          * The BD ring replenish thresholds control how often the
1362          * hardware fetches new BD's from the producer rings in host
1363          * memory.  Setting the value too low on a busy system can
1364          * starve the hardware and recue the throughpout.
1365          *
1366          * Set the BD ring replentish thresholds. The recommended
1367          * values are 1/8th the number of descriptors allocated to
1368          * each ring.
1369          */
1370         val = 8;
1371         CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1372         if (BNX_IS_JUMBO_CAPABLE(sc)) {
1373                 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1374                     BGE_JUMBO_RX_RING_CNT/8);
1375         }
1376         if (BNX_IS_57765_PLUS(sc)) {
1377                 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1378                 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1379         }
1380
1381         /*
1382          * Disable all send rings by setting the 'ring disabled' bit
1383          * in the flags field of all the TX send ring control blocks,
1384          * located in NIC memory.
1385          */
1386         if (BNX_IS_5717_PLUS(sc))
1387                 limit = 4;
1388         else if (BNX_IS_57765_FAMILY(sc))
1389                 limit = 2;
1390         else
1391                 limit = 1;
1392         vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1393         for (i = 0; i < limit; i++) {
1394                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1395                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1396                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1397                 vrcb += sizeof(struct bge_rcb);
1398         }
1399
1400         /* Configure send ring RCB 0 (we use only the first ring) */
1401         vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1402         BGE_HOSTADDR(taddr, sc->bnx_ldata.bnx_tx_ring_paddr);
1403         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1404         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1405         if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 ||
1406             sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1407             sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1408                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
1409         } else {
1410                 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1411                     BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1412         }
1413         RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1414             BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1415
1416         /*
1417          * Disable all receive return rings by setting the
1418          * 'ring disabled' bit in the flags field of all the receive
1419          * return ring control blocks, located in NIC memory.
1420          */
1421         if (BNX_IS_5717_PLUS(sc)) {
1422                 /* Should be 17, use 16 until we get an SRAM map. */
1423                 limit = 16;
1424         } else if (BNX_IS_57765_FAMILY(sc)) {
1425                 limit = 4;
1426         } else {
1427                 limit = 1;
1428         }
1429         /* Disable all receive return rings. */
1430         vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1431         for (i = 0; i < limit; i++) {
1432                 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1433                 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1434                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1435                     BGE_RCB_FLAG_RING_DISABLED);
1436                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1437                 bnx_writembx(sc, BGE_MBX_RX_CONS0_LO +
1438                     (i * (sizeof(uint64_t))), 0);
1439                 vrcb += sizeof(struct bge_rcb);
1440         }
1441
1442         /*
1443          * Set up receive return ring 0.  Note that the NIC address
1444          * for RX return rings is 0x0.  The return rings live entirely
1445          * within the host, so the nicaddr field in the RCB isn't used.
1446          */
1447         vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1448         BGE_HOSTADDR(taddr, sc->bnx_ldata.bnx_rx_return_ring_paddr);
1449         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1450         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1451         RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1452         RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1453             BGE_RCB_MAXLEN_FLAGS(sc->bnx_return_ring_cnt, 0));
1454
1455         /* Set random backoff seed for TX */
1456         CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1457             sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1458             sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1459             sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1460             BGE_TX_BACKOFF_SEED_MASK);
1461
1462         /* Set inter-packet gap */
1463         val = 0x2620;
1464         if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1465                 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
1466                     (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
1467         }
1468         CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
1469
1470         /*
1471          * Specify which ring to use for packets that don't match
1472          * any RX rules.
1473          */
1474         CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1475
1476         /*
1477          * Configure number of RX lists. One interrupt distribution
1478          * list, sixteen active lists, one bad frames class.
1479          */
1480         CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1481
1482         /* Inialize RX list placement stats mask. */
1483         CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1484         CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1485
1486         /* Disable host coalescing until we get it set up */
1487         CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1488
1489         /* Poll to make sure it's shut down. */
1490         for (i = 0; i < BNX_TIMEOUT; i++) {
1491                 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1492                         break;
1493                 DELAY(10);
1494         }
1495
1496         if (i == BNX_TIMEOUT) {
1497                 if_printf(&sc->arpcom.ac_if,
1498                           "host coalescing engine failed to idle\n");
1499                 return(ENXIO);
1500         }
1501
1502         /* Set up host coalescing defaults */
1503         CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bnx_rx_coal_ticks);
1504         CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bnx_tx_coal_ticks);
1505         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bnx_rx_coal_bds);
1506         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bnx_tx_coal_bds);
1507         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, sc->bnx_rx_coal_bds_int);
1508         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, sc->bnx_tx_coal_bds_int);
1509
1510         /* Set up address of status block */
1511         bzero(sc->bnx_ldata.bnx_status_block, BGE_STATUS_BLK_SZ);
1512         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1513             BGE_ADDR_HI(sc->bnx_ldata.bnx_status_block_paddr));
1514         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1515             BGE_ADDR_LO(sc->bnx_ldata.bnx_status_block_paddr));
1516
1517         /* Set up status block partail update size. */
1518         val = BGE_STATBLKSZ_32BYTE;
1519 #if 0
1520         /*
1521          * Does not seem to have visible effect in both
1522          * bulk data (1472B UDP datagram) and tiny data
1523          * (18B UDP datagram) TX tests.
1524          */
1525         val |= BGE_HCCMODE_CLRTICK_TX;
1526 #endif
1527         /* Turn on host coalescing state machine */
1528         CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1529
1530         /* Turn on RX BD completion state machine and enable attentions */
1531         CSR_WRITE_4(sc, BGE_RBDC_MODE,
1532             BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1533
1534         /* Turn on RX list placement state machine */
1535         CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1536
1537         val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1538             BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1539             BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1540             BGE_MACMODE_FRMHDR_DMA_ENB;
1541
1542         if (sc->bnx_flags & BNX_FLAG_TBI)
1543                 val |= BGE_PORTMODE_TBI;
1544         else if (sc->bnx_flags & BNX_FLAG_MII_SERDES)
1545                 val |= BGE_PORTMODE_GMII;
1546         else
1547                 val |= BGE_PORTMODE_MII;
1548
1549         /* Turn on DMA, clear stats */
1550         CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1551
1552         /* Set misc. local control, enable interrupts on attentions */
1553         CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1554
1555 #ifdef notdef
1556         /* Assert GPIO pins for PHY reset */
1557         BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1558             BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1559         BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1560             BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1561 #endif
1562
1563         /* Turn on write DMA state machine */
1564         val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1565         /* Enable host coalescing bug fix. */
1566         val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1567         if (sc->bnx_asicrev == BGE_ASICREV_BCM5785) {
1568                 /* Request larger DMA burst size to get better performance. */
1569                 val |= BGE_WDMAMODE_BURST_ALL_DATA;
1570         }
1571         CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1572         DELAY(40);
1573
1574         if (BNX_IS_57765_PLUS(sc)) {
1575                 uint32_t dmactl;
1576
1577                 dmactl = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL);
1578                 /*
1579                  * Adjust tx margin to prevent TX data corruption and
1580                  * fix internal FIFO overflow.
1581                  */
1582                 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1583                     sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1584                         dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
1585                             BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
1586                             BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
1587                         dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
1588                             BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
1589                             BGE_RDMA_RSRVCTRL_TXMRGN_320B;
1590                 }
1591                 /*
1592                  * Enable fix for read DMA FIFO overruns.
1593                  * The fix is to limit the number of RX BDs
1594                  * the hardware would fetch at a fime.
1595                  */
1596                 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL,
1597                     dmactl | BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
1598         }
1599
1600         if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) {
1601                 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
1602                     CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
1603                     BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
1604                     BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
1605         } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1606                 /*
1607                  * Allow 4KB burst length reads for non-LSO frames.
1608                  * Enable 512B burst length reads for buffer descriptors.
1609                  */
1610                 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
1611                     CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
1612                     BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
1613                     BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
1614         }
1615
1616         /* Turn on read DMA state machine */
1617         val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1618         if (sc->bnx_asicrev == BGE_ASICREV_BCM5717)
1619                 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
1620         if (sc->bnx_asicrev == BGE_ASICREV_BCM5784 ||
1621             sc->bnx_asicrev == BGE_ASICREV_BCM5785 ||
1622             sc->bnx_asicrev == BGE_ASICREV_BCM57780) {
1623                 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1624                     BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1625                     BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1626         }
1627         if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
1628                 val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
1629                     BGE_RDMAMODE_H2BNC_VLAN_DET;
1630                 /*
1631                  * Allow multiple outstanding read requests from
1632                  * non-LSO read DMA engine.
1633                  */
1634                 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
1635         }
1636         val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1637         CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1638         DELAY(40);
1639
1640         /* Turn on RX data completion state machine */
1641         CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1642
1643         /* Turn on RX BD initiator state machine */
1644         CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1645
1646         /* Turn on RX data and RX BD initiator state machine */
1647         CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1648
1649         /* Turn on send BD completion state machine */
1650         CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1651
1652         /* Turn on send data completion state machine */
1653         val = BGE_SDCMODE_ENABLE;
1654         if (sc->bnx_asicrev == BGE_ASICREV_BCM5761)
1655                 val |= BGE_SDCMODE_CDELAY; 
1656         CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1657
1658         /* Turn on send data initiator state machine */
1659         CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1660
1661         /* Turn on send BD initiator state machine */
1662         CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1663
1664         /* Turn on send BD selector state machine */
1665         CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1666
1667         CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1668         CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1669             BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1670
1671         /* ack/clear link change events */
1672         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1673             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1674             BGE_MACSTAT_LINK_CHANGED);
1675         CSR_WRITE_4(sc, BGE_MI_STS, 0);
1676
1677         /*
1678          * Enable attention when the link has changed state for
1679          * devices that use auto polling.
1680          */
1681         if (sc->bnx_flags & BNX_FLAG_TBI) {
1682                 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1683         } else {
1684                 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
1685                         CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode);
1686                         DELAY(80);
1687                 }
1688         }
1689
1690         /*
1691          * Clear any pending link state attention.
1692          * Otherwise some link state change events may be lost until attention
1693          * is cleared by bnx_intr() -> bnx_softc.bnx_link_upd() sequence.
1694          * It's not necessary on newer BCM chips - perhaps enabling link
1695          * state change attentions implies clearing pending attention.
1696          */
1697         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1698             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1699             BGE_MACSTAT_LINK_CHANGED);
1700
1701         /* Enable link state change attentions. */
1702         BNX_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1703
1704         return(0);
1705 }
1706
1707 /*
1708  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1709  * against our list and return its name if we find a match. Note
1710  * that since the Broadcom controller contains VPD support, we
1711  * can get the device name string from the controller itself instead
1712  * of the compiled-in string. This is a little slow, but it guarantees
1713  * we'll always announce the right product name.
1714  */
1715 static int
1716 bnx_probe(device_t dev)
1717 {
1718         const struct bnx_type *t;
1719         uint16_t product, vendor;
1720
1721         if (!pci_is_pcie(dev))
1722                 return ENXIO;
1723
1724         product = pci_get_device(dev);
1725         vendor = pci_get_vendor(dev);
1726
1727         for (t = bnx_devs; t->bnx_name != NULL; t++) {
1728                 if (vendor == t->bnx_vid && product == t->bnx_did)
1729                         break;
1730         }
1731         if (t->bnx_name == NULL)
1732                 return ENXIO;
1733
1734         device_set_desc(dev, t->bnx_name);
1735         return 0;
1736 }
1737
1738 static int
1739 bnx_attach(device_t dev)
1740 {
1741         struct ifnet *ifp;
1742         struct bnx_softc *sc;
1743         uint32_t hwcfg = 0, misccfg;
1744         int error = 0, rid, capmask;
1745         uint8_t ether_addr[ETHER_ADDR_LEN];
1746         uint16_t product, vendor;
1747         driver_intr_t *intr_func;
1748         uintptr_t mii_priv = 0;
1749         u_int intr_flags;
1750
1751         sc = device_get_softc(dev);
1752         sc->bnx_dev = dev;
1753         callout_init_mp(&sc->bnx_stat_timer);
1754         lwkt_serialize_init(&sc->bnx_jslot_serializer);
1755
1756         product = pci_get_device(dev);
1757         vendor = pci_get_vendor(dev);
1758
1759 #ifndef BURN_BRIDGES
1760         if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1761                 uint32_t irq, mem;
1762
1763                 irq = pci_read_config(dev, PCIR_INTLINE, 4);
1764                 mem = pci_read_config(dev, BGE_PCI_BAR0, 4);
1765
1766                 device_printf(dev, "chip is in D%d power mode "
1767                     "-- setting to D0\n", pci_get_powerstate(dev));
1768
1769                 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1770
1771                 pci_write_config(dev, PCIR_INTLINE, irq, 4);
1772                 pci_write_config(dev, BGE_PCI_BAR0, mem, 4);
1773         }
1774 #endif  /* !BURN_BRIDGE */
1775
1776         /*
1777          * Map control/status registers.
1778          */
1779         pci_enable_busmaster(dev);
1780
1781         rid = BGE_PCI_BAR0;
1782         sc->bnx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1783             RF_ACTIVE);
1784
1785         if (sc->bnx_res == NULL) {
1786                 device_printf(dev, "couldn't map memory\n");
1787                 return ENXIO;
1788         }
1789
1790         sc->bnx_btag = rman_get_bustag(sc->bnx_res);
1791         sc->bnx_bhandle = rman_get_bushandle(sc->bnx_res);
1792
1793         /* Save various chip information */
1794         sc->bnx_chipid =
1795             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
1796             BGE_PCIMISCCTL_ASICREV_SHIFT;
1797         if (BGE_ASICREV(sc->bnx_chipid) == BGE_ASICREV_USE_PRODID_REG) {
1798                 /* All chips having dedicated ASICREV register have CPMU */
1799                 sc->bnx_flags |= BNX_FLAG_CPMU;
1800
1801                 switch (product) {
1802                 case PCI_PRODUCT_BROADCOM_BCM5717:
1803                 case PCI_PRODUCT_BROADCOM_BCM5718:
1804                 case PCI_PRODUCT_BROADCOM_BCM5719:
1805                 case PCI_PRODUCT_BROADCOM_BCM5720_ALT:
1806                         sc->bnx_chipid = pci_read_config(dev,
1807                             BGE_PCI_GEN2_PRODID_ASICREV, 4);
1808                         break;
1809
1810                 case PCI_PRODUCT_BROADCOM_BCM57761:
1811                 case PCI_PRODUCT_BROADCOM_BCM57762:
1812                 case PCI_PRODUCT_BROADCOM_BCM57765:
1813                 case PCI_PRODUCT_BROADCOM_BCM57766:
1814                 case PCI_PRODUCT_BROADCOM_BCM57781:
1815                 case PCI_PRODUCT_BROADCOM_BCM57782:
1816                 case PCI_PRODUCT_BROADCOM_BCM57785:
1817                 case PCI_PRODUCT_BROADCOM_BCM57786:
1818                 case PCI_PRODUCT_BROADCOM_BCM57791:
1819                 case PCI_PRODUCT_BROADCOM_BCM57795:
1820                         sc->bnx_chipid = pci_read_config(dev,
1821                             BGE_PCI_GEN15_PRODID_ASICREV, 4);
1822                         break;
1823
1824                 default:
1825                         sc->bnx_chipid = pci_read_config(dev,
1826                             BGE_PCI_PRODID_ASICREV, 4);
1827                         break;
1828                 }
1829         }
1830         sc->bnx_asicrev = BGE_ASICREV(sc->bnx_chipid);
1831         sc->bnx_chiprev = BGE_CHIPREV(sc->bnx_chipid);
1832
1833         switch (sc->bnx_asicrev) {
1834         case BGE_ASICREV_BCM5717:
1835         case BGE_ASICREV_BCM5719:
1836         case BGE_ASICREV_BCM5720:
1837                 sc->bnx_flags |= BNX_FLAG_5717_PLUS | BNX_FLAG_57765_PLUS;
1838                 break;
1839
1840         case BGE_ASICREV_BCM57765:
1841         case BGE_ASICREV_BCM57766:
1842                 sc->bnx_flags |= BNX_FLAG_57765_FAMILY | BNX_FLAG_57765_PLUS;
1843                 break;
1844         }
1845         sc->bnx_flags |= BNX_FLAG_SHORTDMA;
1846
1847         misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK;
1848
1849         sc->bnx_pciecap = pci_get_pciecap_ptr(sc->bnx_dev);
1850         if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 ||
1851             sc->bnx_asicrev == BGE_ASICREV_BCM5720)
1852                 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_2048);
1853         else
1854                 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096);
1855         device_printf(dev, "CHIP ID 0x%08x; "
1856                       "ASIC REV 0x%02x; CHIP REV 0x%02x\n",
1857                       sc->bnx_chipid, sc->bnx_asicrev, sc->bnx_chiprev);
1858
1859         /*
1860          * Set various PHY quirk flags.
1861          */
1862
1863         capmask = MII_CAPMASK_DEFAULT;
1864         if (product == PCI_PRODUCT_BROADCOM_BCM57791 ||
1865             product == PCI_PRODUCT_BROADCOM_BCM57795) {
1866                 /* 10/100 only */
1867                 capmask &= ~BMSR_EXTSTAT;
1868         }
1869
1870         mii_priv |= BRGPHY_FLAG_WIRESPEED;
1871
1872         /*
1873          * Allocate interrupt
1874          */
1875         sc->bnx_irq_type = pci_alloc_1intr(dev, bnx_msi_enable, &sc->bnx_irq_rid,
1876             &intr_flags);
1877
1878         sc->bnx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->bnx_irq_rid,
1879             intr_flags);
1880         if (sc->bnx_irq == NULL) {
1881                 device_printf(dev, "couldn't map interrupt\n");
1882                 error = ENXIO;
1883                 goto fail;
1884         }
1885
1886         if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) {
1887                 sc->bnx_flags |= BNX_FLAG_ONESHOT_MSI;
1888                 bnx_enable_msi(sc);
1889         }
1890
1891         /* Initialize if_name earlier, so if_printf could be used */
1892         ifp = &sc->arpcom.ac_if;
1893         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1894
1895         /* Try to reset the chip. */
1896         bnx_reset(sc);
1897
1898         if (bnx_chipinit(sc)) {
1899                 device_printf(dev, "chip initialization failed\n");
1900                 error = ENXIO;
1901                 goto fail;
1902         }
1903
1904         /*
1905          * Get station address
1906          */
1907         error = bnx_get_eaddr(sc, ether_addr);
1908         if (error) {
1909                 device_printf(dev, "failed to read station address\n");
1910                 goto fail;
1911         }
1912
1913         if (BNX_IS_57765_PLUS(sc)) {
1914                 sc->bnx_return_ring_cnt = BGE_RETURN_RING_CNT;
1915         } else {
1916                 /* 5705/5750 limits RX return ring to 512 entries. */
1917                 sc->bnx_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1918         }
1919
1920         error = bnx_dma_alloc(sc);
1921         if (error)
1922                 goto fail;
1923
1924         /* Set default tuneable values. */
1925         sc->bnx_rx_coal_ticks = BNX_RX_COAL_TICKS_DEF;
1926         sc->bnx_tx_coal_ticks = BNX_TX_COAL_TICKS_DEF;
1927         sc->bnx_rx_coal_bds = BNX_RX_COAL_BDS_DEF;
1928         sc->bnx_tx_coal_bds = BNX_TX_COAL_BDS_DEF;
1929         sc->bnx_rx_coal_bds_int = BNX_RX_COAL_BDS_DEF;
1930         sc->bnx_tx_coal_bds_int = BNX_TX_COAL_BDS_DEF;
1931
1932         /* Set up ifnet structure */
1933         ifp->if_softc = sc;
1934         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1935         ifp->if_ioctl = bnx_ioctl;
1936         ifp->if_start = bnx_start;
1937 #ifdef DEVICE_POLLING
1938         ifp->if_poll = bnx_poll;
1939 #endif
1940         ifp->if_watchdog = bnx_watchdog;
1941         ifp->if_init = bnx_init;
1942         ifp->if_mtu = ETHERMTU;
1943         ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1944         ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1945         ifq_set_ready(&ifp->if_snd);
1946
1947         ifp->if_capabilities |= IFCAP_HWCSUM;
1948         ifp->if_hwassist = BNX_CSUM_FEATURES;
1949         ifp->if_capenable = ifp->if_capabilities;
1950
1951         /*
1952          * Figure out what sort of media we have by checking the
1953          * hardware config word in the first 32k of NIC internal memory,
1954          * or fall back to examining the EEPROM if necessary.
1955          * Note: on some BCM5700 cards, this value appears to be unset.
1956          * If that's the case, we have to rely on identifying the NIC
1957          * by its PCI subsystem ID, as we do below for the SysKonnect
1958          * SK-9D41.
1959          */
1960         if (bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) {
1961                 hwcfg = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
1962         } else {
1963                 if (bnx_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
1964                                     sizeof(hwcfg))) {
1965                         device_printf(dev, "failed to read EEPROM\n");
1966                         error = ENXIO;
1967                         goto fail;
1968                 }
1969                 hwcfg = ntohl(hwcfg);
1970         }
1971
1972         /* The SysKonnect SK-9D41 is a 1000baseSX card. */
1973         if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41 ||
1974             (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
1975                 sc->bnx_flags |= BNX_FLAG_TBI;
1976
1977         /* Setup MI MODE */
1978         if (sc->bnx_flags & BNX_FLAG_CPMU)
1979                 sc->bnx_mi_mode = BGE_MIMODE_500KHZ_CONST;
1980         else
1981                 sc->bnx_mi_mode = BGE_MIMODE_BASE;
1982
1983         /* Setup link status update stuffs */
1984         if (sc->bnx_flags & BNX_FLAG_TBI) {
1985                 sc->bnx_link_upd = bnx_tbi_link_upd;
1986                 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
1987         } else if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) {
1988                 sc->bnx_link_upd = bnx_autopoll_link_upd;
1989                 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
1990         } else {
1991                 sc->bnx_link_upd = bnx_copper_link_upd;
1992                 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED;
1993         }
1994
1995         /* Set default PHY address */
1996         sc->bnx_phyno = 1;
1997
1998         /*
1999          * PHY address mapping for various devices.
2000          *
2001          *          | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2002          * ---------+-------+-------+-------+-------+
2003          * BCM57XX  |   1   |   X   |   X   |   X   |
2004          * BCM5704  |   1   |   X   |   1   |   X   |
2005          * BCM5717  |   1   |   8   |   2   |   9   |
2006          * BCM5719  |   1   |   8   |   2   |   9   |
2007          * BCM5720  |   1   |   8   |   2   |   9   |
2008          *
2009          * Other addresses may respond but they are not
2010          * IEEE compliant PHYs and should be ignored.
2011          */
2012         if (BNX_IS_5717_PLUS(sc)) {
2013                 int f;
2014
2015                 f = pci_get_function(dev);
2016                 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_A0) {
2017                         if (CSR_READ_4(sc, BGE_SGDIG_STS) &
2018                             BGE_SGDIGSTS_IS_SERDES)
2019                                 sc->bnx_phyno = f + 8;
2020                         else
2021                                 sc->bnx_phyno = f + 1;
2022                 } else {
2023                         if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2024                             BGE_CPMU_PHY_STRAP_IS_SERDES)
2025                                 sc->bnx_phyno = f + 8;
2026                         else
2027                                 sc->bnx_phyno = f + 1;
2028                 }
2029         }
2030
2031         if (sc->bnx_flags & BNX_FLAG_TBI) {
2032                 ifmedia_init(&sc->bnx_ifmedia, IFM_IMASK,
2033                     bnx_ifmedia_upd, bnx_ifmedia_sts);
2034                 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2035                 ifmedia_add(&sc->bnx_ifmedia,
2036                     IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2037                 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2038                 ifmedia_set(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO);
2039                 sc->bnx_ifmedia.ifm_media = sc->bnx_ifmedia.ifm_cur->ifm_media;
2040         } else {
2041                 struct mii_probe_args mii_args;
2042
2043                 mii_probe_args_init(&mii_args, bnx_ifmedia_upd, bnx_ifmedia_sts);
2044                 mii_args.mii_probemask = 1 << sc->bnx_phyno;
2045                 mii_args.mii_capmask = capmask;
2046                 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY;
2047                 mii_args.mii_priv = mii_priv;
2048
2049                 error = mii_probe(dev, &sc->bnx_miibus, &mii_args);
2050                 if (error) {
2051                         device_printf(dev, "MII without any PHY!\n");
2052                         goto fail;
2053                 }
2054         }
2055
2056         /*
2057          * Create sysctl nodes.
2058          */
2059         sysctl_ctx_init(&sc->bnx_sysctl_ctx);
2060         sc->bnx_sysctl_tree = SYSCTL_ADD_NODE(&sc->bnx_sysctl_ctx,
2061                                               SYSCTL_STATIC_CHILDREN(_hw),
2062                                               OID_AUTO,
2063                                               device_get_nameunit(dev),
2064                                               CTLFLAG_RD, 0, "");
2065         if (sc->bnx_sysctl_tree == NULL) {
2066                 device_printf(dev, "can't add sysctl node\n");
2067                 error = ENXIO;
2068                 goto fail;
2069         }
2070
2071         SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2072                         SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2073                         OID_AUTO, "rx_coal_ticks",
2074                         CTLTYPE_INT | CTLFLAG_RW,
2075                         sc, 0, bnx_sysctl_rx_coal_ticks, "I",
2076                         "Receive coalescing ticks (usec).");
2077         SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2078                         SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2079                         OID_AUTO, "tx_coal_ticks",
2080                         CTLTYPE_INT | CTLFLAG_RW,
2081                         sc, 0, bnx_sysctl_tx_coal_ticks, "I",
2082                         "Transmit coalescing ticks (usec).");
2083         SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2084                         SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2085                         OID_AUTO, "rx_coal_bds",
2086                         CTLTYPE_INT | CTLFLAG_RW,
2087                         sc, 0, bnx_sysctl_rx_coal_bds, "I",
2088                         "Receive max coalesced BD count.");
2089         SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2090                         SYSCTL_CHILDREN(sc->bnx_sysctl_tree),
2091                         OID_AUTO, "tx_coal_bds",
2092                         CTLTYPE_INT | CTLFLAG_RW,
2093                         sc, 0, bnx_sysctl_tx_coal_bds, "I",
2094                         "Transmit max coalesced BD count.");
2095         /*
2096          * A common design characteristic for many Broadcom
2097          * client controllers is that they only support a
2098          * single outstanding DMA read operation on the PCIe
2099          * bus. This means that it will take twice as long to
2100          * fetch a TX frame that is split into header and
2101          * payload buffers as it does to fetch a single,
2102          * contiguous TX frame (2 reads vs. 1 read). For these
2103          * controllers, coalescing buffers to reduce the number
2104          * of memory reads is effective way to get maximum
2105          * performance(about 940Mbps).  Without collapsing TX
2106          * buffers the maximum TCP bulk transfer performance
2107          * is about 850Mbps. However forcing coalescing mbufs
2108          * consumes a lot of CPU cycles, so leave it off by
2109          * default.
2110          */
2111         SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx,
2112             SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2113             "force_defrag", CTLFLAG_RW, &sc->bnx_force_defrag, 0,
2114             "Force defragment on TX path");
2115
2116         SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2117             SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2118             "rx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2119             sc, 0, bnx_sysctl_rx_coal_bds_int, "I",
2120             "Receive max coalesced BD count during interrupt.");
2121         SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx,
2122             SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO,
2123             "tx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2124             sc, 0, bnx_sysctl_tx_coal_bds_int, "I",
2125             "Transmit max coalesced BD count during interrupt.");
2126
2127         /*
2128          * Call MI attach routine.
2129          */
2130         ether_ifattach(ifp, ether_addr, NULL);
2131
2132         if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) {
2133                 if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) {
2134                         intr_func = bnx_msi_oneshot;
2135                         if (bootverbose)
2136                                 device_printf(dev, "oneshot MSI\n");
2137                 } else {
2138                         intr_func = bnx_msi;
2139                 }
2140         } else {
2141                 intr_func = bnx_intr_legacy;
2142         }
2143         error = bus_setup_intr(dev, sc->bnx_irq, INTR_MPSAFE, intr_func, sc,
2144             &sc->bnx_intrhand, ifp->if_serializer);
2145         if (error) {
2146                 ether_ifdetach(ifp);
2147                 device_printf(dev, "couldn't set up irq\n");
2148                 goto fail;
2149         }
2150
2151         ifp->if_cpuid = rman_get_cpuid(sc->bnx_irq);
2152         KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
2153
2154         return(0);
2155 fail:
2156         bnx_detach(dev);
2157         return(error);
2158 }
2159
2160 static int
2161 bnx_detach(device_t dev)
2162 {
2163         struct bnx_softc *sc = device_get_softc(dev);
2164
2165         if (device_is_attached(dev)) {
2166                 struct ifnet *ifp = &sc->arpcom.ac_if;
2167
2168                 lwkt_serialize_enter(ifp->if_serializer);
2169                 bnx_stop(sc);
2170                 bnx_reset(sc);
2171                 bus_teardown_intr(dev, sc->bnx_irq, sc->bnx_intrhand);
2172                 lwkt_serialize_exit(ifp->if_serializer);
2173
2174                 ether_ifdetach(ifp);
2175         }
2176
2177         if (sc->bnx_flags & BNX_FLAG_TBI)
2178                 ifmedia_removeall(&sc->bnx_ifmedia);
2179         if (sc->bnx_miibus)
2180                 device_delete_child(dev, sc->bnx_miibus);
2181         bus_generic_detach(dev);
2182
2183         if (sc->bnx_irq != NULL) {
2184                 bus_release_resource(dev, SYS_RES_IRQ, sc->bnx_irq_rid,
2185                     sc->bnx_irq);
2186         }
2187         if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI)
2188                 pci_release_msi(dev);
2189
2190         if (sc->bnx_res != NULL) {
2191                 bus_release_resource(dev, SYS_RES_MEMORY,
2192                     BGE_PCI_BAR0, sc->bnx_res);
2193         }
2194
2195         if (sc->bnx_sysctl_tree != NULL)
2196                 sysctl_ctx_free(&sc->bnx_sysctl_ctx);
2197
2198         bnx_dma_free(sc);
2199
2200         return 0;
2201 }
2202
2203 static void
2204 bnx_reset(struct bnx_softc *sc)
2205 {
2206         device_t dev;
2207         uint32_t cachesize, command, pcistate, reset;
2208         void (*write_op)(struct bnx_softc *, uint32_t, uint32_t);
2209         int i, val = 0;
2210         uint16_t devctl;
2211
2212         dev = sc->bnx_dev;
2213
2214         if (sc->bnx_asicrev != BGE_ASICREV_BCM5906)
2215                 write_op = bnx_writemem_direct;
2216         else
2217                 write_op = bnx_writereg_ind;
2218
2219         /* Save some important PCI state. */
2220         cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2221         command = pci_read_config(dev, BGE_PCI_CMD, 4);
2222         pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2223
2224         pci_write_config(dev, BGE_PCI_MISC_CTL,
2225             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2226             BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2227             BGE_PCIMISCCTL_TAGGED_STATUS, 4);
2228
2229         /* Disable fastboot on controllers that support it. */
2230         if (bootverbose)
2231                 if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n");
2232         CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2233
2234         /*
2235          * Write the magic number to SRAM at offset 0xB50.
2236          * When firmware finishes its initialization it will
2237          * write ~BGE_MAGIC_NUMBER to the same location.
2238          */
2239         bnx_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2240
2241         reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2242
2243         /* XXX: Broadcom Linux driver. */
2244         /* Force PCI-E 1.0a mode */
2245         if (!BNX_IS_57765_PLUS(sc) &&
2246             CSR_READ_4(sc, BGE_PCIE_PHY_TSTCTL) ==
2247             (BGE_PCIE_PHY_TSTCTL_PSCRAM |
2248              BGE_PCIE_PHY_TSTCTL_PCIE10)) {
2249                 CSR_WRITE_4(sc, BGE_PCIE_PHY_TSTCTL,
2250                     BGE_PCIE_PHY_TSTCTL_PSCRAM);
2251         }
2252         if (sc->bnx_chipid != BGE_CHIPID_BCM5750_A0) {
2253                 /* Prevent PCIE link training during global reset */
2254                 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2255                 reset |= (1<<29);
2256         }
2257
2258         /* 
2259          * Set GPHY Power Down Override to leave GPHY
2260          * powered up in D0 uninitialized.
2261          */
2262         if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0)
2263                 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
2264
2265         /* Issue global reset */
2266         write_op(sc, BGE_MISC_CFG, reset);
2267
2268         if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
2269                 uint32_t status, ctrl;
2270
2271                 status = CSR_READ_4(sc, BGE_VCPU_STATUS);
2272                 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
2273                     status | BGE_VCPU_STATUS_DRV_RESET);
2274                 ctrl = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
2275                 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
2276                     ctrl & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
2277         }
2278
2279         DELAY(1000);
2280
2281         /* XXX: Broadcom Linux driver. */
2282         if (sc->bnx_chipid == BGE_CHIPID_BCM5750_A0) {
2283                 uint32_t v;
2284
2285                 DELAY(500000); /* wait for link training to complete */
2286                 v = pci_read_config(dev, 0xc4, 4);
2287                 pci_write_config(dev, 0xc4, v | (1<<15), 4);
2288         }
2289
2290         devctl = pci_read_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL, 2);
2291
2292         /* Disable no snoop and disable relaxed ordering. */
2293         devctl &= ~(PCIEM_DEVCTL_RELAX_ORDER | PCIEM_DEVCTL_NOSNOOP);
2294
2295         /* Old PCI-E chips only support 128 bytes Max PayLoad Size. */
2296         if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0) {
2297                 devctl &= ~PCIEM_DEVCTL_MAX_PAYLOAD_MASK;
2298                 devctl |= PCIEM_DEVCTL_MAX_PAYLOAD_128;
2299         }
2300
2301         pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL,
2302             devctl, 2);
2303
2304         /* Clear error status. */
2305         pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVSTS,
2306             PCIEM_DEVSTS_CORR_ERR |
2307             PCIEM_DEVSTS_NFATAL_ERR |
2308             PCIEM_DEVSTS_FATAL_ERR |
2309             PCIEM_DEVSTS_UNSUPP_REQ, 2);
2310
2311         /* Reset some of the PCI state that got zapped by reset */
2312         pci_write_config(dev, BGE_PCI_MISC_CTL,
2313             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2314             BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2315             BGE_PCIMISCCTL_TAGGED_STATUS, 4);
2316         pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2317         pci_write_config(dev, BGE_PCI_CMD, command, 4);
2318         write_op(sc, BGE_MISC_CFG, (65 << 1));
2319
2320         /* Enable memory arbiter */
2321         CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2322
2323         if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
2324                 for (i = 0; i < BNX_TIMEOUT; i++) {
2325                         val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2326                         if (val & BGE_VCPU_STATUS_INIT_DONE)
2327                                 break;
2328                         DELAY(100);
2329                 }
2330                 if (i == BNX_TIMEOUT) {
2331                         if_printf(&sc->arpcom.ac_if, "reset timed out\n");
2332                         return;
2333                 }
2334         } else {
2335                 /*
2336                  * Poll until we see the 1's complement of the magic number.
2337                  * This indicates that the firmware initialization
2338                  * is complete.
2339                  */
2340                 for (i = 0; i < BNX_FIRMWARE_TIMEOUT; i++) {
2341                         val = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2342                         if (val == ~BGE_MAGIC_NUMBER)
2343                                 break;
2344                         DELAY(10);
2345                 }
2346                 if (i == BNX_FIRMWARE_TIMEOUT) {
2347                         if_printf(&sc->arpcom.ac_if, "firmware handshake "
2348                                   "timed out, found 0x%08x\n", val);
2349                 }
2350
2351                 /* BCM57765 A0 needs additional time before accessing. */
2352                 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0)
2353                         DELAY(10 * 1000);
2354         }
2355
2356         /*
2357          * XXX Wait for the value of the PCISTATE register to
2358          * return to its original pre-reset state. This is a
2359          * fairly good indicator of reset completion. If we don't
2360          * wait for the reset to fully complete, trying to read
2361          * from the device's non-PCI registers may yield garbage
2362          * results.
2363          */
2364         for (i = 0; i < BNX_TIMEOUT; i++) {
2365                 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2366                         break;
2367                 DELAY(10);
2368         }
2369
2370         /* Fix up byte swapping */
2371         CSR_WRITE_4(sc, BGE_MODE_CTL, bnx_dma_swap_options(sc));
2372
2373         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2374
2375         /*
2376          * The 5704 in TBI mode apparently needs some special
2377          * adjustment to insure the SERDES drive level is set
2378          * to 1.2V.
2379          */
2380         if (sc->bnx_asicrev == BGE_ASICREV_BCM5704 &&
2381             (sc->bnx_flags & BNX_FLAG_TBI)) {
2382                 uint32_t serdescfg;
2383
2384                 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2385                 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2386                 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2387         }
2388
2389         /* XXX: Broadcom Linux driver. */
2390         if (!BNX_IS_57765_PLUS(sc)) {
2391                 uint32_t v;
2392
2393                 /* Enable Data FIFO protection. */
2394                 v = CSR_READ_4(sc, BGE_PCIE_TLDLPL_PORT);
2395                 CSR_WRITE_4(sc, BGE_PCIE_TLDLPL_PORT, v | (1 << 25));
2396         }
2397
2398         DELAY(10000);
2399
2400         if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
2401                 BNX_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
2402                     CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
2403         }
2404 }
2405
2406 /*
2407  * Frame reception handling. This is called if there's a frame
2408  * on the receive return list.
2409  *
2410  * Note: we have to be able to handle two possibilities here:
2411  * 1) the frame is from the jumbo recieve ring
2412  * 2) the frame is from the standard receive ring
2413  */
2414
2415 static void
2416 bnx_rxeof(struct bnx_softc *sc, uint16_t rx_prod)
2417 {
2418         struct ifnet *ifp;
2419         int stdcnt = 0, jumbocnt = 0;
2420
2421         ifp = &sc->arpcom.ac_if;
2422
2423         while (sc->bnx_rx_saved_considx != rx_prod) {
2424                 struct bge_rx_bd        *cur_rx;
2425                 uint32_t                rxidx;
2426                 struct mbuf             *m = NULL;
2427                 uint16_t                vlan_tag = 0;
2428                 int                     have_tag = 0;
2429
2430                 cur_rx =
2431             &sc->bnx_ldata.bnx_rx_return_ring[sc->bnx_rx_saved_considx];
2432
2433                 rxidx = cur_rx->bge_idx;
2434                 BNX_INC(sc->bnx_rx_saved_considx, sc->bnx_return_ring_cnt);
2435
2436                 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2437                         have_tag = 1;
2438                         vlan_tag = cur_rx->bge_vlan_tag;
2439                 }
2440
2441                 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2442                         BNX_INC(sc->bnx_jumbo, BGE_JUMBO_RX_RING_CNT);
2443                         jumbocnt++;
2444
2445                         if (rxidx != sc->bnx_jumbo) {
2446                                 ifp->if_ierrors++;
2447                                 if_printf(ifp, "sw jumbo index(%d) "
2448                                     "and hw jumbo index(%d) mismatch, drop!\n",
2449                                     sc->bnx_jumbo, rxidx);
2450                                 bnx_setup_rxdesc_jumbo(sc, rxidx);
2451                                 continue;
2452                         }
2453
2454                         m = sc->bnx_cdata.bnx_rx_jumbo_chain[rxidx].bnx_mbuf;
2455                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2456                                 ifp->if_ierrors++;
2457                                 bnx_setup_rxdesc_jumbo(sc, sc->bnx_jumbo);
2458                                 continue;
2459                         }
2460                         if (bnx_newbuf_jumbo(sc, sc->bnx_jumbo, 0)) {
2461                                 ifp->if_ierrors++;
2462                                 bnx_setup_rxdesc_jumbo(sc, sc->bnx_jumbo);
2463                                 continue;
2464                         }
2465                 } else {
2466                         BNX_INC(sc->bnx_std, BGE_STD_RX_RING_CNT);
2467                         stdcnt++;
2468
2469                         if (rxidx != sc->bnx_std) {
2470                                 ifp->if_ierrors++;
2471                                 if_printf(ifp, "sw std index(%d) "
2472                                     "and hw std index(%d) mismatch, drop!\n",
2473                                     sc->bnx_std, rxidx);
2474                                 bnx_setup_rxdesc_std(sc, rxidx);
2475                                 continue;
2476                         }
2477
2478                         m = sc->bnx_cdata.bnx_rx_std_chain[rxidx].bnx_mbuf;
2479                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2480                                 ifp->if_ierrors++;
2481                                 bnx_setup_rxdesc_std(sc, sc->bnx_std);
2482                                 continue;
2483                         }
2484                         if (bnx_newbuf_std(sc, sc->bnx_std, 0)) {
2485                                 ifp->if_ierrors++;
2486                                 bnx_setup_rxdesc_std(sc, sc->bnx_std);
2487                                 continue;
2488                         }
2489                 }
2490
2491                 ifp->if_ipackets++;
2492                 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2493                 m->m_pkthdr.rcvif = ifp;
2494
2495                 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2496                     (cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
2497                         if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2498                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2499                                 if ((cur_rx->bge_error_flag &
2500                                     BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
2501                                         m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2502                         }
2503                         if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2504                                 m->m_pkthdr.csum_data =
2505                                     cur_rx->bge_tcp_udp_csum;
2506                                 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
2507                                     CSUM_PSEUDO_HDR;
2508                         }
2509                 }
2510
2511                 /*
2512                  * If we received a packet with a vlan tag, pass it
2513                  * to vlan_input() instead of ether_input().
2514                  */
2515                 if (have_tag) {
2516                         m->m_flags |= M_VLANTAG;
2517                         m->m_pkthdr.ether_vlantag = vlan_tag;
2518                         have_tag = vlan_tag = 0;
2519                 }
2520                 ifp->if_input(ifp, m);
2521         }
2522
2523         bnx_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bnx_rx_saved_considx);
2524         if (stdcnt)
2525                 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bnx_std);
2526         if (jumbocnt)
2527                 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo);
2528 }
2529
2530 static void
2531 bnx_txeof(struct bnx_softc *sc, uint16_t tx_cons)
2532 {
2533         struct bge_tx_bd *cur_tx = NULL;
2534         struct ifnet *ifp;
2535
2536         ifp = &sc->arpcom.ac_if;
2537
2538         /*
2539          * Go through our tx ring and free mbufs for those
2540          * frames that have been sent.
2541          */
2542         while (sc->bnx_tx_saved_considx != tx_cons) {
2543                 uint32_t idx = 0;
2544
2545                 idx = sc->bnx_tx_saved_considx;
2546                 cur_tx = &sc->bnx_ldata.bnx_tx_ring[idx];
2547                 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2548                         ifp->if_opackets++;
2549                 if (sc->bnx_cdata.bnx_tx_chain[idx] != NULL) {
2550                         bus_dmamap_unload(sc->bnx_cdata.bnx_tx_mtag,
2551                             sc->bnx_cdata.bnx_tx_dmamap[idx]);
2552                         m_freem(sc->bnx_cdata.bnx_tx_chain[idx]);
2553                         sc->bnx_cdata.bnx_tx_chain[idx] = NULL;
2554                 }
2555                 sc->bnx_txcnt--;
2556                 BNX_INC(sc->bnx_tx_saved_considx, BGE_TX_RING_CNT);
2557         }
2558
2559         if (cur_tx != NULL &&
2560             (BGE_TX_RING_CNT - sc->bnx_txcnt) >=
2561             (BNX_NSEG_RSVD + BNX_NSEG_SPARE))
2562                 ifp->if_flags &= ~IFF_OACTIVE;
2563
2564         if (sc->bnx_txcnt == 0)
2565                 ifp->if_timer = 0;
2566
2567         if (!ifq_is_empty(&ifp->if_snd))
2568                 if_devstart(ifp);
2569 }
2570
2571 #ifdef DEVICE_POLLING
2572
2573 static void
2574 bnx_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2575 {
2576         struct bnx_softc *sc = ifp->if_softc;
2577         struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
2578         uint16_t rx_prod, tx_cons;
2579
2580         switch(cmd) {
2581         case POLL_REGISTER:
2582                 bnx_disable_intr(sc);
2583                 break;
2584         case POLL_DEREGISTER:
2585                 bnx_enable_intr(sc);
2586                 break;
2587         case POLL_AND_CHECK_STATUS:
2588                 /*
2589                  * Process link state changes.
2590                  */
2591                 bnx_link_poll(sc);
2592                 /* Fall through */
2593         case POLL_ONLY:
2594                 sc->bnx_status_tag = sblk->bge_status_tag;
2595                 /*
2596                  * Use a load fence to ensure that status_tag
2597                  * is saved  before rx_prod and tx_cons.
2598                  */
2599                 cpu_lfence();
2600
2601                 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
2602                 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
2603                 if (ifp->if_flags & IFF_RUNNING) {
2604                         rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
2605                         if (sc->bnx_rx_saved_considx != rx_prod)
2606                                 bnx_rxeof(sc, rx_prod);
2607
2608                         tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
2609                         if (sc->bnx_tx_saved_considx != tx_cons)
2610                                 bnx_txeof(sc, tx_cons);
2611                 }
2612                 break;
2613         }
2614 }
2615
2616 #endif
2617
2618 static void
2619 bnx_intr_legacy(void *xsc)
2620 {
2621         struct bnx_softc *sc = xsc;
2622         struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
2623
2624         if (sc->bnx_status_tag == sblk->bge_status_tag) {
2625                 uint32_t val;
2626
2627                 val = pci_read_config(sc->bnx_dev, BGE_PCI_PCISTATE, 4);
2628                 if (val & BGE_PCISTAT_INTR_NOTACT)
2629                         return;
2630         }
2631
2632         /*
2633          * NOTE:
2634          * Interrupt will have to be disabled if tagged status
2635          * is used, else interrupt will always be asserted on
2636          * certain chips (at least on BCM5750 AX/BX).
2637          */
2638         bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
2639
2640         bnx_intr(sc);
2641 }
2642
2643 static void
2644 bnx_msi(void *xsc)
2645 {
2646         struct bnx_softc *sc = xsc;
2647
2648         /* Disable interrupt first */
2649         bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
2650         bnx_intr(sc);
2651 }
2652
2653 static void
2654 bnx_msi_oneshot(void *xsc)
2655 {
2656         bnx_intr(xsc);
2657 }
2658
2659 static void
2660 bnx_intr(struct bnx_softc *sc)
2661 {
2662         struct ifnet *ifp = &sc->arpcom.ac_if;
2663         struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block;
2664         uint16_t rx_prod, tx_cons;
2665         uint32_t status;
2666
2667         sc->bnx_status_tag = sblk->bge_status_tag;
2668         /*
2669          * Use a load fence to ensure that status_tag is saved 
2670          * before rx_prod, tx_cons and status.
2671          */
2672         cpu_lfence();
2673
2674         rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
2675         tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
2676         status = sblk->bge_status;
2677
2678         if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) || sc->bnx_link_evt)
2679                 bnx_link_poll(sc);
2680
2681         if (ifp->if_flags & IFF_RUNNING) {
2682                 if (sc->bnx_rx_saved_considx != rx_prod)
2683                         bnx_rxeof(sc, rx_prod);
2684
2685                 if (sc->bnx_tx_saved_considx != tx_cons)
2686                         bnx_txeof(sc, tx_cons);
2687         }
2688
2689         bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24);
2690
2691         if (sc->bnx_coal_chg)
2692                 bnx_coal_change(sc);
2693 }
2694
2695 static void
2696 bnx_tick(void *xsc)
2697 {
2698         struct bnx_softc *sc = xsc;
2699         struct ifnet *ifp = &sc->arpcom.ac_if;
2700
2701         lwkt_serialize_enter(ifp->if_serializer);
2702
2703         bnx_stats_update_regs(sc);
2704
2705         if (sc->bnx_flags & BNX_FLAG_TBI) {
2706                 /*
2707                  * Since in TBI mode auto-polling can't be used we should poll
2708                  * link status manually. Here we register pending link event
2709                  * and trigger interrupt.
2710                  */
2711                 sc->bnx_link_evt++;
2712                 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
2713         } else if (!sc->bnx_link) {
2714                 mii_tick(device_get_softc(sc->bnx_miibus));
2715         }
2716
2717         callout_reset(&sc->bnx_stat_timer, hz, bnx_tick, sc);
2718
2719         lwkt_serialize_exit(ifp->if_serializer);
2720 }
2721
2722 static void
2723 bnx_stats_update_regs(struct bnx_softc *sc)
2724 {
2725         struct ifnet *ifp = &sc->arpcom.ac_if;
2726         struct bge_mac_stats_regs stats;
2727         uint32_t *s;
2728         int i;
2729
2730         s = (uint32_t *)&stats;
2731         for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2732                 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2733                 s++;
2734         }
2735
2736         ifp->if_collisions +=
2737            (stats.dot3StatsSingleCollisionFrames +
2738            stats.dot3StatsMultipleCollisionFrames +
2739            stats.dot3StatsExcessiveCollisions +
2740            stats.dot3StatsLateCollisions) -
2741            ifp->if_collisions;
2742 }
2743
2744 /*
2745  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
2746  * pointers to descriptors.
2747  */
2748 static int
2749 bnx_encap(struct bnx_softc *sc, struct mbuf **m_head0, uint32_t *txidx)
2750 {
2751         struct bge_tx_bd *d = NULL;
2752         uint16_t csum_flags = 0;
2753         bus_dma_segment_t segs[BNX_NSEG_NEW];
2754         bus_dmamap_t map;
2755         int error, maxsegs, nsegs, idx, i;
2756         struct mbuf *m_head = *m_head0, *m_new;
2757
2758         if (m_head->m_pkthdr.csum_flags) {
2759                 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2760                         csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2761                 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2762                         csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2763                 if (m_head->m_flags & M_LASTFRAG)
2764                         csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2765                 else if (m_head->m_flags & M_FRAG)
2766                         csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2767         }
2768
2769         idx = *txidx;
2770         map = sc->bnx_cdata.bnx_tx_dmamap[idx];
2771
2772         maxsegs = (BGE_TX_RING_CNT - sc->bnx_txcnt) - BNX_NSEG_RSVD;
2773         KASSERT(maxsegs >= BNX_NSEG_SPARE,
2774                 ("not enough segments %d", maxsegs));
2775
2776         if (maxsegs > BNX_NSEG_NEW)
2777                 maxsegs = BNX_NSEG_NEW;
2778
2779         /*
2780          * Pad outbound frame to BGE_MIN_FRAMELEN for an unusual reason.
2781          * The bge hardware will pad out Tx runts to BGE_MIN_FRAMELEN,
2782          * but when such padded frames employ the bge IP/TCP checksum
2783          * offload, the hardware checksum assist gives incorrect results
2784          * (possibly from incorporating its own padding into the UDP/TCP
2785          * checksum; who knows).  If we pad such runts with zeros, the
2786          * onboard checksum comes out correct.
2787          */
2788         if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
2789             m_head->m_pkthdr.len < BNX_MIN_FRAMELEN) {
2790                 error = m_devpad(m_head, BNX_MIN_FRAMELEN);
2791                 if (error)
2792                         goto back;
2793         }
2794
2795         if ((sc->bnx_flags & BNX_FLAG_SHORTDMA) && m_head->m_next != NULL) {
2796                 m_new = bnx_defrag_shortdma(m_head);
2797                 if (m_new == NULL) {
2798                         error = ENOBUFS;
2799                         goto back;
2800                 }
2801                 *m_head0 = m_head = m_new;
2802         }
2803         if (sc->bnx_force_defrag && m_head->m_next != NULL) {
2804                 /*
2805                  * Forcefully defragment mbuf chain to overcome hardware
2806                  * limitation which only support a single outstanding
2807                  * DMA read operation.  If it fails, keep moving on using
2808                  * the original mbuf chain.
2809                  */
2810                 m_new = m_defrag(m_head, MB_DONTWAIT);
2811                 if (m_new != NULL)
2812                         *m_head0 = m_head = m_new;
2813         }
2814
2815         error = bus_dmamap_load_mbuf_defrag(sc->bnx_cdata.bnx_tx_mtag, map,
2816                         m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
2817         if (error)
2818                 goto back;
2819
2820         m_head = *m_head0;
2821         bus_dmamap_sync(sc->bnx_cdata.bnx_tx_mtag, map, BUS_DMASYNC_PREWRITE);
2822
2823         for (i = 0; ; i++) {
2824                 d = &sc->bnx_ldata.bnx_tx_ring[idx];
2825
2826                 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
2827                 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
2828                 d->bge_len = segs[i].ds_len;
2829                 d->bge_flags = csum_flags;
2830
2831                 if (i == nsegs - 1)
2832                         break;
2833                 BNX_INC(idx, BGE_TX_RING_CNT);
2834         }
2835         /* Mark the last segment as end of packet... */
2836         d->bge_flags |= BGE_TXBDFLAG_END;
2837
2838         /* Set vlan tag to the first segment of the packet. */
2839         d = &sc->bnx_ldata.bnx_tx_ring[*txidx];
2840         if (m_head->m_flags & M_VLANTAG) {
2841                 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2842                 d->bge_vlan_tag = m_head->m_pkthdr.ether_vlantag;
2843         } else {
2844                 d->bge_vlan_tag = 0;
2845         }
2846
2847         /*
2848          * Insure that the map for this transmission is placed at
2849          * the array index of the last descriptor in this chain.
2850          */
2851         sc->bnx_cdata.bnx_tx_dmamap[*txidx] = sc->bnx_cdata.bnx_tx_dmamap[idx];
2852         sc->bnx_cdata.bnx_tx_dmamap[idx] = map;
2853         sc->bnx_cdata.bnx_tx_chain[idx] = m_head;
2854         sc->bnx_txcnt += nsegs;
2855
2856         BNX_INC(idx, BGE_TX_RING_CNT);
2857         *txidx = idx;
2858 back:
2859         if (error) {
2860                 m_freem(*m_head0);
2861                 *m_head0 = NULL;
2862         }
2863         return error;
2864 }
2865
2866 /*
2867  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2868  * to the mbuf data regions directly in the transmit descriptors.
2869  */
2870 static void
2871 bnx_start(struct ifnet *ifp)
2872 {
2873         struct bnx_softc *sc = ifp->if_softc;
2874         struct mbuf *m_head = NULL;
2875         uint32_t prodidx;
2876         int need_trans;
2877
2878         if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
2879                 return;
2880
2881         prodidx = sc->bnx_tx_prodidx;
2882
2883         need_trans = 0;
2884         while (sc->bnx_cdata.bnx_tx_chain[prodidx] == NULL) {
2885                 m_head = ifq_dequeue(&ifp->if_snd, NULL);
2886                 if (m_head == NULL)
2887                         break;
2888
2889                 /*
2890                  * XXX
2891                  * The code inside the if() block is never reached since we
2892                  * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
2893                  * requests to checksum TCP/UDP in a fragmented packet.
2894                  * 
2895                  * XXX
2896                  * safety overkill.  If this is a fragmented packet chain
2897                  * with delayed TCP/UDP checksums, then only encapsulate
2898                  * it if we have enough descriptors to handle the entire
2899                  * chain at once.
2900                  * (paranoia -- may not actually be needed)
2901                  */
2902                 if ((m_head->m_flags & M_FIRSTFRAG) &&
2903                     (m_head->m_pkthdr.csum_flags & CSUM_DELAY_DATA)) {
2904                         if ((BGE_TX_RING_CNT - sc->bnx_txcnt) <
2905                             m_head->m_pkthdr.csum_data + BNX_NSEG_RSVD) {
2906                                 ifp->if_flags |= IFF_OACTIVE;
2907                                 ifq_prepend(&ifp->if_snd, m_head);
2908                                 break;
2909                         }
2910                 }
2911
2912                 /*
2913                  * Sanity check: avoid coming within BGE_NSEG_RSVD
2914                  * descriptors of the end of the ring.  Also make
2915                  * sure there are BGE_NSEG_SPARE descriptors for
2916                  * jumbo buffers' defragmentation.
2917                  */
2918                 if ((BGE_TX_RING_CNT - sc->bnx_txcnt) <
2919                     (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) {
2920                         ifp->if_flags |= IFF_OACTIVE;
2921                         ifq_prepend(&ifp->if_snd, m_head);
2922                         break;
2923                 }
2924
2925                 /*
2926                  * Pack the data into the transmit ring. If we
2927                  * don't have room, set the OACTIVE flag and wait
2928                  * for the NIC to drain the ring.
2929                  */
2930                 if (bnx_encap(sc, &m_head, &prodidx)) {
2931                         ifp->if_flags |= IFF_OACTIVE;
2932                         ifp->if_oerrors++;
2933                         break;
2934                 }
2935                 need_trans = 1;
2936
2937                 ETHER_BPF_MTAP(ifp, m_head);
2938         }
2939
2940         if (!need_trans)
2941                 return;
2942
2943         /* Transmit */
2944         bnx_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2945
2946         sc->bnx_tx_prodidx = prodidx;
2947
2948         /*
2949          * Set a timeout in case the chip goes out to lunch.
2950          */
2951         ifp->if_timer = 5;
2952 }
2953
2954 static void
2955 bnx_init(void *xsc)
2956 {
2957         struct bnx_softc *sc = xsc;
2958         struct ifnet *ifp = &sc->arpcom.ac_if;
2959         uint16_t *m;
2960         uint32_t mode;
2961
2962         ASSERT_SERIALIZED(ifp->if_serializer);
2963
2964         /* Cancel pending I/O and flush buffers. */
2965         bnx_stop(sc);
2966         bnx_reset(sc);
2967         bnx_chipinit(sc);
2968
2969         /*
2970          * Init the various state machines, ring
2971          * control blocks and firmware.
2972          */
2973         if (bnx_blockinit(sc)) {
2974                 if_printf(ifp, "initialization failure\n");
2975                 bnx_stop(sc);
2976                 return;
2977         }
2978
2979         /* Specify MTU. */
2980         CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
2981             ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
2982
2983         /* Load our MAC address. */
2984         m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
2985         CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2986         CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2987
2988         /* Enable or disable promiscuous mode as needed. */
2989         bnx_setpromisc(sc);
2990
2991         /* Program multicast filter. */
2992         bnx_setmulti(sc);
2993
2994         /* Init RX ring. */
2995         if (bnx_init_rx_ring_std(sc)) {
2996                 if_printf(ifp, "RX ring initialization failed\n");
2997                 bnx_stop(sc);
2998                 return;
2999         }
3000
3001         /* Init jumbo RX ring. */
3002         if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) {
3003                 if (bnx_init_rx_ring_jumbo(sc)) {
3004                         if_printf(ifp, "Jumbo RX ring initialization failed\n");
3005                         bnx_stop(sc);
3006                         return;
3007                 }
3008         }
3009
3010         /* Init our RX return ring index */
3011         sc->bnx_rx_saved_considx = 0;
3012
3013         /* Init TX ring. */
3014         bnx_init_tx_ring(sc);
3015
3016         /* Enable TX MAC state machine lockup fix. */
3017         mode = CSR_READ_4(sc, BGE_TX_MODE);
3018         mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
3019         if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
3020                 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
3021                 mode |= CSR_READ_4(sc, BGE_TX_MODE) &
3022                     (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
3023         }
3024         /* Turn on transmitter */
3025         CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
3026
3027         /* Turn on receiver */
3028         BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3029
3030         /*
3031          * Set the number of good frames to receive after RX MBUF
3032          * Low Watermark has been reached.  After the RX MAC receives
3033          * this number of frames, it will drop subsequent incoming
3034          * frames until the MBUF High Watermark is reached.
3035          */
3036         if (BNX_IS_57765_FAMILY(sc))
3037                 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
3038         else
3039                 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
3040
3041         if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) {
3042                 if (bootverbose) {
3043                         if_printf(ifp, "MSI_MODE: %#x\n",
3044                             CSR_READ_4(sc, BGE_MSI_MODE));
3045                 }
3046         }
3047
3048         /* Tell firmware we're alive. */
3049         BNX_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3050
3051         /* Enable host interrupts if polling(4) is not enabled. */
3052         PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA, 4);
3053 #ifdef DEVICE_POLLING
3054         if (ifp->if_flags & IFF_POLLING)
3055                 bnx_disable_intr(sc);
3056         else
3057 #endif
3058         bnx_enable_intr(sc);
3059
3060         bnx_ifmedia_upd(ifp);
3061
3062         ifp->if_flags |= IFF_RUNNING;
3063         ifp->if_flags &= ~IFF_OACTIVE;
3064
3065         callout_reset(&sc->bnx_stat_timer, hz, bnx_tick, sc);
3066 }
3067
3068 /*
3069  * Set media options.
3070  */
3071 static int
3072 bnx_ifmedia_upd(struct ifnet *ifp)
3073 {
3074         struct bnx_softc *sc = ifp->if_softc;
3075
3076         /* If this is a 1000baseX NIC, enable the TBI port. */
3077         if (sc->bnx_flags & BNX_FLAG_TBI) {
3078                 struct ifmedia *ifm = &sc->bnx_ifmedia;
3079
3080                 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3081                         return(EINVAL);
3082
3083                 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3084                 case IFM_AUTO:
3085                         break;
3086
3087                 case IFM_1000_SX:
3088                         if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3089                                 BNX_CLRBIT(sc, BGE_MAC_MODE,
3090                                     BGE_MACMODE_HALF_DUPLEX);
3091                         } else {
3092                                 BNX_SETBIT(sc, BGE_MAC_MODE,
3093                                     BGE_MACMODE_HALF_DUPLEX);
3094                         }
3095                         break;
3096                 default:
3097                         return(EINVAL);
3098                 }
3099         } else {
3100                 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3101
3102                 sc->bnx_link_evt++;
3103                 sc->bnx_link = 0;
3104                 if (mii->mii_instance) {
3105                         struct mii_softc *miisc;
3106
3107                         LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3108                                 mii_phy_reset(miisc);
3109                 }
3110                 mii_mediachg(mii);
3111
3112                 /*
3113                  * Force an interrupt so that we will call bnx_link_upd
3114                  * if needed and clear any pending link state attention.
3115                  * Without this we are not getting any further interrupts
3116                  * for link state changes and thus will not UP the link and
3117                  * not be able to send in bnx_start.  The only way to get
3118                  * things working was to receive a packet and get an RX
3119                  * intr.
3120                  *
3121                  * bnx_tick should help for fiber cards and we might not
3122                  * need to do this here if BNX_FLAG_TBI is set but as
3123                  * we poll for fiber anyway it should not harm.
3124                  */
3125                 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3126         }
3127         return(0);
3128 }
3129
3130 /*
3131  * Report current media status.
3132  */
3133 static void
3134 bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3135 {
3136         struct bnx_softc *sc = ifp->if_softc;
3137
3138         if (sc->bnx_flags & BNX_FLAG_TBI) {
3139                 ifmr->ifm_status = IFM_AVALID;
3140                 ifmr->ifm_active = IFM_ETHER;
3141                 if (CSR_READ_4(sc, BGE_MAC_STS) &
3142                     BGE_MACSTAT_TBI_PCS_SYNCHED) {
3143                         ifmr->ifm_status |= IFM_ACTIVE;
3144                 } else {
3145                         ifmr->ifm_active |= IFM_NONE;
3146                         return;
3147                 }
3148
3149                 ifmr->ifm_active |= IFM_1000_SX;
3150                 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3151                         ifmr->ifm_active |= IFM_HDX;    
3152                 else
3153                         ifmr->ifm_active |= IFM_FDX;
3154         } else {
3155                 struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3156
3157                 mii_pollstat(mii);
3158                 ifmr->ifm_active = mii->mii_media_active;
3159                 ifmr->ifm_status = mii->mii_media_status;
3160         }
3161 }
3162
3163 static int
3164 bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
3165 {
3166         struct bnx_softc *sc = ifp->if_softc;
3167         struct ifreq *ifr = (struct ifreq *)data;
3168         int mask, error = 0;
3169
3170         ASSERT_SERIALIZED(ifp->if_serializer);
3171
3172         switch (command) {
3173         case SIOCSIFMTU:
3174                 if ((!BNX_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) ||
3175                     (BNX_IS_JUMBO_CAPABLE(sc) &&
3176                      ifr->ifr_mtu > BNX_JUMBO_MTU)) {
3177                         error = EINVAL;
3178                 } else if (ifp->if_mtu != ifr->ifr_mtu) {
3179                         ifp->if_mtu = ifr->ifr_mtu;
3180                         if (ifp->if_flags & IFF_RUNNING)
3181                                 bnx_init(sc);
3182                 }
3183                 break;
3184         case SIOCSIFFLAGS:
3185                 if (ifp->if_flags & IFF_UP) {
3186                         if (ifp->if_flags & IFF_RUNNING) {
3187                                 mask = ifp->if_flags ^ sc->bnx_if_flags;
3188
3189                                 /*
3190                                  * If only the state of the PROMISC flag
3191                                  * changed, then just use the 'set promisc
3192                                  * mode' command instead of reinitializing
3193                                  * the entire NIC. Doing a full re-init
3194                                  * means reloading the firmware and waiting
3195                                  * for it to start up, which may take a
3196                                  * second or two.  Similarly for ALLMULTI.
3197                                  */
3198                                 if (mask & IFF_PROMISC)
3199                                         bnx_setpromisc(sc);
3200                                 if (mask & IFF_ALLMULTI)
3201                                         bnx_setmulti(sc);
3202                         } else {
3203                                 bnx_init(sc);
3204                         }
3205                 } else if (ifp->if_flags & IFF_RUNNING) {
3206                         bnx_stop(sc);
3207                 }
3208                 sc->bnx_if_flags = ifp->if_flags;
3209                 break;
3210         case SIOCADDMULTI:
3211         case SIOCDELMULTI:
3212                 if (ifp->if_flags & IFF_RUNNING)
3213                         bnx_setmulti(sc);
3214                 break;
3215         case SIOCSIFMEDIA:
3216         case SIOCGIFMEDIA:
3217                 if (sc->bnx_flags & BNX_FLAG_TBI) {
3218                         error = ifmedia_ioctl(ifp, ifr,
3219                             &sc->bnx_ifmedia, command);
3220                 } else {
3221                         struct mii_data *mii;
3222
3223                         mii = device_get_softc(sc->bnx_miibus);
3224                         error = ifmedia_ioctl(ifp, ifr,
3225                                               &mii->mii_media, command);
3226                 }
3227                 break;
3228         case SIOCSIFCAP:
3229                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3230                 if (mask & IFCAP_HWCSUM) {
3231                         ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
3232                         if (IFCAP_HWCSUM & ifp->if_capenable)
3233                                 ifp->if_hwassist = BNX_CSUM_FEATURES;
3234                         else
3235                                 ifp->if_hwassist = 0;
3236                 }
3237                 break;
3238         default:
3239                 error = ether_ioctl(ifp, command, data);
3240                 break;
3241         }
3242         return error;
3243 }
3244
3245 static void
3246 bnx_watchdog(struct ifnet *ifp)
3247 {
3248         struct bnx_softc *sc = ifp->if_softc;
3249
3250         if_printf(ifp, "watchdog timeout -- resetting\n");
3251
3252         bnx_init(sc);
3253
3254         ifp->if_oerrors++;
3255
3256         if (!ifq_is_empty(&ifp->if_snd))
3257                 if_devstart(ifp);
3258 }
3259
3260 /*
3261  * Stop the adapter and free any mbufs allocated to the
3262  * RX and TX lists.
3263  */
3264 static void
3265 bnx_stop(struct bnx_softc *sc)
3266 {
3267         struct ifnet *ifp = &sc->arpcom.ac_if;
3268
3269         ASSERT_SERIALIZED(ifp->if_serializer);
3270
3271         callout_stop(&sc->bnx_stat_timer);
3272
3273         /*
3274          * Disable all of the receiver blocks
3275          */
3276         bnx_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3277         bnx_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3278         bnx_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3279         bnx_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3280         bnx_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3281         bnx_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3282
3283         /*
3284          * Disable all of the transmit blocks
3285          */
3286         bnx_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3287         bnx_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3288         bnx_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3289         bnx_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3290         bnx_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3291         bnx_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3292
3293         /*
3294          * Shut down all of the memory managers and related
3295          * state machines.
3296          */
3297         bnx_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3298         bnx_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3299         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3300         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3301
3302         /* Disable host interrupts. */
3303         bnx_disable_intr(sc);
3304
3305         /*
3306          * Tell firmware we're shutting down.
3307          */
3308         BNX_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3309
3310         /* Free the RX lists. */
3311         bnx_free_rx_ring_std(sc);
3312
3313         /* Free jumbo RX list. */
3314         if (BNX_IS_JUMBO_CAPABLE(sc))
3315                 bnx_free_rx_ring_jumbo(sc);
3316
3317         /* Free TX buffers. */
3318         bnx_free_tx_ring(sc);
3319
3320         sc->bnx_status_tag = 0;
3321         sc->bnx_link = 0;
3322         sc->bnx_coal_chg = 0;
3323
3324         sc->bnx_tx_saved_considx = BNX_TXCONS_UNSET;
3325
3326         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3327         ifp->if_timer = 0;
3328 }
3329
3330 /*
3331  * Stop all chip I/O so that the kernel's probe routines don't
3332  * get confused by errant DMAs when rebooting.
3333  */
3334 static void
3335 bnx_shutdown(device_t dev)
3336 {
3337         struct bnx_softc *sc = device_get_softc(dev);
3338         struct ifnet *ifp = &sc->arpcom.ac_if;
3339
3340         lwkt_serialize_enter(ifp->if_serializer);
3341         bnx_stop(sc);
3342         bnx_reset(sc);
3343         lwkt_serialize_exit(ifp->if_serializer);
3344 }
3345
3346 static int
3347 bnx_suspend(device_t dev)
3348 {
3349         struct bnx_softc *sc = device_get_softc(dev);
3350         struct ifnet *ifp = &sc->arpcom.ac_if;
3351
3352         lwkt_serialize_enter(ifp->if_serializer);
3353         bnx_stop(sc);
3354         lwkt_serialize_exit(ifp->if_serializer);
3355
3356         return 0;
3357 }
3358
3359 static int
3360 bnx_resume(device_t dev)
3361 {
3362         struct bnx_softc *sc = device_get_softc(dev);
3363         struct ifnet *ifp = &sc->arpcom.ac_if;
3364
3365         lwkt_serialize_enter(ifp->if_serializer);
3366
3367         if (ifp->if_flags & IFF_UP) {
3368                 bnx_init(sc);
3369
3370                 if (!ifq_is_empty(&ifp->if_snd))
3371                         if_devstart(ifp);
3372         }
3373
3374         lwkt_serialize_exit(ifp->if_serializer);
3375
3376         return 0;
3377 }
3378
3379 static void
3380 bnx_setpromisc(struct bnx_softc *sc)
3381 {
3382         struct ifnet *ifp = &sc->arpcom.ac_if;
3383
3384         if (ifp->if_flags & IFF_PROMISC)
3385                 BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3386         else
3387                 BNX_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3388 }
3389
3390 static void
3391 bnx_dma_free(struct bnx_softc *sc)
3392 {
3393         int i;
3394
3395         /* Destroy RX mbuf DMA stuffs. */
3396         if (sc->bnx_cdata.bnx_rx_mtag != NULL) {
3397                 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3398                         bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag,
3399                             sc->bnx_cdata.bnx_rx_std_dmamap[i]);
3400                 }
3401                 bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag,
3402                                    sc->bnx_cdata.bnx_rx_tmpmap);
3403                 bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag);
3404         }
3405
3406         /* Destroy TX mbuf DMA stuffs. */
3407         if (sc->bnx_cdata.bnx_tx_mtag != NULL) {
3408                 for (i = 0; i < BGE_TX_RING_CNT; i++) {
3409                         bus_dmamap_destroy(sc->bnx_cdata.bnx_tx_mtag,
3410                             sc->bnx_cdata.bnx_tx_dmamap[i]);
3411                 }
3412                 bus_dma_tag_destroy(sc->bnx_cdata.bnx_tx_mtag);
3413         }
3414
3415         /* Destroy standard RX ring */
3416         bnx_dma_block_free(sc->bnx_cdata.bnx_rx_std_ring_tag,
3417                            sc->bnx_cdata.bnx_rx_std_ring_map,
3418                            sc->bnx_ldata.bnx_rx_std_ring);
3419
3420         if (BNX_IS_JUMBO_CAPABLE(sc))
3421                 bnx_free_jumbo_mem(sc);
3422
3423         /* Destroy RX return ring */
3424         bnx_dma_block_free(sc->bnx_cdata.bnx_rx_return_ring_tag,
3425                            sc->bnx_cdata.bnx_rx_return_ring_map,
3426                            sc->bnx_ldata.bnx_rx_return_ring);
3427
3428         /* Destroy TX ring */
3429         bnx_dma_block_free(sc->bnx_cdata.bnx_tx_ring_tag,
3430                            sc->bnx_cdata.bnx_tx_ring_map,
3431                            sc->bnx_ldata.bnx_tx_ring);
3432
3433         /* Destroy status block */
3434         bnx_dma_block_free(sc->bnx_cdata.bnx_status_tag,
3435                            sc->bnx_cdata.bnx_status_map,
3436                            sc->bnx_ldata.bnx_status_block);
3437
3438         /* Destroy the parent tag */
3439         if (sc->bnx_cdata.bnx_parent_tag != NULL)
3440                 bus_dma_tag_destroy(sc->bnx_cdata.bnx_parent_tag);
3441 }
3442
3443 static int
3444 bnx_dma_alloc(struct bnx_softc *sc)
3445 {
3446         struct ifnet *ifp = &sc->arpcom.ac_if;
3447         int i, error;
3448
3449         /*
3450          * Allocate the parent bus DMA tag appropriate for PCI.
3451          *
3452          * All of the NetExtreme/NetLink controllers have 4GB boundary
3453          * DMA bug.
3454          * Whenever an address crosses a multiple of the 4GB boundary
3455          * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
3456          * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
3457          * state machine will lockup and cause the device to hang.
3458          */
3459         error = bus_dma_tag_create(NULL, 1, BGE_DMA_BOUNDARY_4G,
3460                                    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3461                                    NULL, NULL,
3462                                    BUS_SPACE_MAXSIZE_32BIT, 0,
3463                                    BUS_SPACE_MAXSIZE_32BIT,
3464                                    0, &sc->bnx_cdata.bnx_parent_tag);
3465         if (error) {
3466                 if_printf(ifp, "could not allocate parent dma tag\n");
3467                 return error;
3468         }
3469
3470         /*
3471          * Create DMA tag and maps for RX mbufs.
3472          */
3473         error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0,
3474                                    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3475                                    NULL, NULL, MCLBYTES, 1, MCLBYTES,
3476                                    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
3477                                    &sc->bnx_cdata.bnx_rx_mtag);
3478         if (error) {
3479                 if_printf(ifp, "could not allocate RX mbuf dma tag\n");
3480                 return error;
3481         }
3482
3483         error = bus_dmamap_create(sc->bnx_cdata.bnx_rx_mtag,
3484                                   BUS_DMA_WAITOK, &sc->bnx_cdata.bnx_rx_tmpmap);
3485         if (error) {
3486                 bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag);
3487                 sc->bnx_cdata.bnx_rx_mtag = NULL;
3488                 return error;
3489         }
3490
3491         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3492                 error = bus_dmamap_create(sc->bnx_cdata.bnx_rx_mtag,
3493                                           BUS_DMA_WAITOK,
3494                                           &sc->bnx_cdata.bnx_rx_std_dmamap[i]);
3495                 if (error) {
3496                         int j;
3497
3498                         for (j = 0; j < i; ++j) {
3499                                 bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag,
3500                                         sc->bnx_cdata.bnx_rx_std_dmamap[j]);
3501                         }
3502                         bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag);
3503                         sc->bnx_cdata.bnx_rx_mtag = NULL;
3504
3505                         if_printf(ifp, "could not create DMA map for RX\n");
3506                         return error;
3507                 }
3508         }
3509
3510         /*
3511          * Create DMA tag and maps for TX mbufs.
3512          */
3513         error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0,
3514                                    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3515                                    NULL, NULL,
3516                                    BNX_JUMBO_FRAMELEN, BNX_NSEG_NEW, MCLBYTES,
3517                                    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK |
3518                                    BUS_DMA_ONEBPAGE,
3519                                    &sc->bnx_cdata.bnx_tx_mtag);
3520         if (error) {
3521                 if_printf(ifp, "could not allocate TX mbuf dma tag\n");
3522                 return error;
3523         }
3524
3525         for (i = 0; i < BGE_TX_RING_CNT; i++) {
3526                 error = bus_dmamap_create(sc->bnx_cdata.bnx_tx_mtag,
3527                                           BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
3528                                           &sc->bnx_cdata.bnx_tx_dmamap[i]);
3529                 if (error) {
3530                         int j;
3531
3532                         for (j = 0; j < i; ++j) {
3533                                 bus_dmamap_destroy(sc->bnx_cdata.bnx_tx_mtag,
3534                                         sc->bnx_cdata.bnx_tx_dmamap[j]);
3535                         }
3536                         bus_dma_tag_destroy(sc->bnx_cdata.bnx_tx_mtag);
3537                         sc->bnx_cdata.bnx_tx_mtag = NULL;
3538
3539                         if_printf(ifp, "could not create DMA map for TX\n");
3540                         return error;
3541                 }
3542         }
3543
3544         /*
3545          * Create DMA stuffs for standard RX ring.
3546          */
3547         error = bnx_dma_block_alloc(sc, BGE_STD_RX_RING_SZ,
3548                                     &sc->bnx_cdata.bnx_rx_std_ring_tag,
3549                                     &sc->bnx_cdata.bnx_rx_std_ring_map,
3550                                     (void *)&sc->bnx_ldata.bnx_rx_std_ring,
3551                                     &sc->bnx_ldata.bnx_rx_std_ring_paddr);
3552         if (error) {
3553                 if_printf(ifp, "could not create std RX ring\n");
3554                 return error;
3555         }
3556
3557         /*
3558          * Create jumbo buffer pool.
3559          */
3560         if (BNX_IS_JUMBO_CAPABLE(sc)) {
3561                 error = bnx_alloc_jumbo_mem(sc);
3562                 if (error) {
3563                         if_printf(ifp, "could not create jumbo buffer pool\n");
3564                         return error;
3565                 }
3566         }
3567
3568         /*
3569          * Create DMA stuffs for RX return ring.
3570          */
3571         error = bnx_dma_block_alloc(sc,
3572             BGE_RX_RTN_RING_SZ(sc->bnx_return_ring_cnt),
3573             &sc->bnx_cdata.bnx_rx_return_ring_tag,
3574             &sc->bnx_cdata.bnx_rx_return_ring_map,
3575             (void *)&sc->bnx_ldata.bnx_rx_return_ring,
3576             &sc->bnx_ldata.bnx_rx_return_ring_paddr);
3577         if (error) {
3578                 if_printf(ifp, "could not create RX ret ring\n");
3579                 return error;
3580         }
3581
3582         /*
3583          * Create DMA stuffs for TX ring.
3584          */
3585         error = bnx_dma_block_alloc(sc, BGE_TX_RING_SZ,
3586                                     &sc->bnx_cdata.bnx_tx_ring_tag,
3587                                     &sc->bnx_cdata.bnx_tx_ring_map,
3588                                     (void *)&sc->bnx_ldata.bnx_tx_ring,
3589                                     &sc->bnx_ldata.bnx_tx_ring_paddr);
3590         if (error) {
3591                 if_printf(ifp, "could not create TX ring\n");
3592                 return error;
3593         }
3594
3595         /*
3596          * Create DMA stuffs for status block.
3597          */
3598         error = bnx_dma_block_alloc(sc, BGE_STATUS_BLK_SZ,
3599                                     &sc->bnx_cdata.bnx_status_tag,
3600                                     &sc->bnx_cdata.bnx_status_map,
3601                                     (void *)&sc->bnx_ldata.bnx_status_block,
3602                                     &sc->bnx_ldata.bnx_status_block_paddr);
3603         if (error) {
3604                 if_printf(ifp, "could not create status block\n");
3605                 return error;
3606         }
3607
3608         return 0;
3609 }
3610
3611 static int
3612 bnx_dma_block_alloc(struct bnx_softc *sc, bus_size_t size, bus_dma_tag_t *tag,
3613                     bus_dmamap_t *map, void **addr, bus_addr_t *paddr)
3614 {
3615         bus_dmamem_t dmem;
3616         int error;
3617
3618         error = bus_dmamem_coherent(sc->bnx_cdata.bnx_parent_tag, PAGE_SIZE, 0,
3619                                     BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3620                                     size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3621         if (error)
3622                 return error;
3623
3624         *tag = dmem.dmem_tag;
3625         *map = dmem.dmem_map;
3626         *addr = dmem.dmem_addr;
3627         *paddr = dmem.dmem_busaddr;
3628
3629         return 0;
3630 }
3631
3632 static void
3633 bnx_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr)
3634 {
3635         if (tag != NULL) {
3636                 bus_dmamap_unload(tag, map);
3637                 bus_dmamem_free(tag, addr, map);
3638                 bus_dma_tag_destroy(tag);
3639         }
3640 }
3641
3642 static void
3643 bnx_tbi_link_upd(struct bnx_softc *sc, uint32_t status)
3644 {
3645         struct ifnet *ifp = &sc->arpcom.ac_if;
3646
3647 #define PCS_ENCODE_ERR  (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE)
3648
3649         /*
3650          * Sometimes PCS encoding errors are detected in
3651          * TBI mode (on fiber NICs), and for some reason
3652          * the chip will signal them as link changes.
3653          * If we get a link change event, but the 'PCS
3654          * encoding error' bit in the MAC status register
3655          * is set, don't bother doing a link check.
3656          * This avoids spurious "gigabit link up" messages
3657          * that sometimes appear on fiber NICs during
3658          * periods of heavy traffic.
3659          */
3660         if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3661                 if (!sc->bnx_link) {
3662                         sc->bnx_link++;
3663                         if (sc->bnx_asicrev == BGE_ASICREV_BCM5704) {
3664                                 BNX_CLRBIT(sc, BGE_MAC_MODE,
3665                                     BGE_MACMODE_TBI_SEND_CFGS);
3666                         }
3667                         CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3668
3669                         if (bootverbose)
3670                                 if_printf(ifp, "link UP\n");
3671
3672                         ifp->if_link_state = LINK_STATE_UP;
3673                         if_link_state_change(ifp);
3674                 }
3675         } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) {
3676                 if (sc->bnx_link) {
3677                         sc->bnx_link = 0;
3678
3679                         if (bootverbose)
3680                                 if_printf(ifp, "link DOWN\n");
3681
3682                         ifp->if_link_state = LINK_STATE_DOWN;
3683                         if_link_state_change(ifp);
3684                 }
3685         }
3686
3687 #undef PCS_ENCODE_ERR
3688
3689         /* Clear the attention. */
3690         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3691             BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3692             BGE_MACSTAT_LINK_CHANGED);
3693 }
3694
3695 static void
3696 bnx_copper_link_upd(struct bnx_softc *sc, uint32_t status __unused)
3697 {
3698         struct ifnet *ifp = &sc->arpcom.ac_if;
3699         struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3700
3701         mii_pollstat(mii);
3702         bnx_miibus_statchg(sc->bnx_dev);
3703
3704         if (bootverbose) {
3705                 if (sc->bnx_link)
3706                         if_printf(ifp, "link UP\n");
3707                 else
3708                         if_printf(ifp, "link DOWN\n");
3709         }
3710
3711         /* Clear the attention. */
3712         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3713             BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3714             BGE_MACSTAT_LINK_CHANGED);
3715 }
3716
3717 static void
3718 bnx_autopoll_link_upd(struct bnx_softc *sc, uint32_t status __unused)
3719 {
3720         struct ifnet *ifp = &sc->arpcom.ac_if;
3721         struct mii_data *mii = device_get_softc(sc->bnx_miibus);
3722
3723         mii_pollstat(mii);
3724
3725         if (!sc->bnx_link &&
3726             (mii->mii_media_status & IFM_ACTIVE) &&
3727             IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3728                 sc->bnx_link++;
3729                 if (bootverbose)
3730                         if_printf(ifp, "link UP\n");
3731         } else if (sc->bnx_link &&
3732             (!(mii->mii_media_status & IFM_ACTIVE) ||
3733             IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3734                 sc->bnx_link = 0;
3735                 if (bootverbose)
3736                         if_printf(ifp, "link DOWN\n");
3737         }
3738
3739         /* Clear the attention. */
3740         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3741             BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3742             BGE_MACSTAT_LINK_CHANGED);
3743 }
3744
3745 static int
3746 bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS)
3747 {
3748         struct bnx_softc *sc = arg1;
3749
3750         return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3751             &sc->bnx_rx_coal_ticks,
3752             BNX_RX_COAL_TICKS_MIN, BNX_RX_COAL_TICKS_MAX,
3753             BNX_RX_COAL_TICKS_CHG);
3754 }
3755
3756 static int
3757 bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS)
3758 {
3759         struct bnx_softc *sc = arg1;
3760
3761         return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3762             &sc->bnx_tx_coal_ticks,
3763             BNX_TX_COAL_TICKS_MIN, BNX_TX_COAL_TICKS_MAX,
3764             BNX_TX_COAL_TICKS_CHG);
3765 }
3766
3767 static int
3768 bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS)
3769 {
3770         struct bnx_softc *sc = arg1;
3771
3772         return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3773             &sc->bnx_rx_coal_bds,
3774             BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX,
3775             BNX_RX_COAL_BDS_CHG);
3776 }
3777
3778 static int
3779 bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS)
3780 {
3781         struct bnx_softc *sc = arg1;
3782
3783         return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3784             &sc->bnx_tx_coal_bds,
3785             BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX,
3786             BNX_TX_COAL_BDS_CHG);
3787 }
3788
3789 static int
3790 bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS)
3791 {
3792         struct bnx_softc *sc = arg1;
3793
3794         return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3795             &sc->bnx_rx_coal_bds_int,
3796             BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX,
3797             BNX_RX_COAL_BDS_INT_CHG);
3798 }
3799
3800 static int
3801 bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS)
3802 {
3803         struct bnx_softc *sc = arg1;
3804
3805         return bnx_sysctl_coal_chg(oidp, arg1, arg2, req,
3806             &sc->bnx_tx_coal_bds_int,
3807             BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX,
3808             BNX_TX_COAL_BDS_INT_CHG);
3809 }
3810
3811 static int
3812 bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal,
3813     int coal_min, int coal_max, uint32_t coal_chg_mask)
3814 {
3815         struct bnx_softc *sc = arg1;
3816         struct ifnet *ifp = &sc->arpcom.ac_if;
3817         int error = 0, v;
3818
3819         lwkt_serialize_enter(ifp->if_serializer);
3820
3821         v = *coal;
3822         error = sysctl_handle_int(oidp, &v, 0, req);
3823         if (!error && req->newptr != NULL) {
3824                 if (v < coal_min || v > coal_max) {
3825                         error = EINVAL;
3826                 } else {
3827                         *coal = v;
3828                         sc->bnx_coal_chg |= coal_chg_mask;
3829                 }
3830         }
3831
3832         lwkt_serialize_exit(ifp->if_serializer);
3833         return error;
3834 }
3835
3836 static void
3837 bnx_coal_change(struct bnx_softc *sc)
3838 {
3839         struct ifnet *ifp = &sc->arpcom.ac_if;
3840         uint32_t val;
3841
3842         ASSERT_SERIALIZED(ifp->if_serializer);
3843
3844         if (sc->bnx_coal_chg & BNX_RX_COAL_TICKS_CHG) {
3845                 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS,
3846                             sc->bnx_rx_coal_ticks);
3847                 DELAY(10);
3848                 val = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
3849
3850                 if (bootverbose) {
3851                         if_printf(ifp, "rx_coal_ticks -> %u\n",
3852                                   sc->bnx_rx_coal_ticks);
3853                 }
3854         }
3855
3856         if (sc->bnx_coal_chg & BNX_TX_COAL_TICKS_CHG) {
3857                 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS,
3858                             sc->bnx_tx_coal_ticks);
3859                 DELAY(10);
3860                 val = CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS);
3861
3862                 if (bootverbose) {
3863                         if_printf(ifp, "tx_coal_ticks -> %u\n",
3864                                   sc->bnx_tx_coal_ticks);
3865                 }
3866         }
3867
3868         if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_CHG) {
3869                 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS,
3870                             sc->bnx_rx_coal_bds);
3871                 DELAY(10);
3872                 val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
3873
3874                 if (bootverbose) {
3875                         if_printf(ifp, "rx_coal_bds -> %u\n",
3876                                   sc->bnx_rx_coal_bds);
3877                 }
3878         }
3879
3880         if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_CHG) {
3881                 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS,
3882                             sc->bnx_tx_coal_bds);
3883                 DELAY(10);
3884                 val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS);
3885
3886                 if (bootverbose) {
3887                         if_printf(ifp, "tx_max_coal_bds -> %u\n",
3888                                   sc->bnx_tx_coal_bds);
3889                 }
3890         }
3891
3892         if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_INT_CHG) {
3893                 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT,
3894                     sc->bnx_rx_coal_bds_int);
3895                 DELAY(10);
3896                 val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT);
3897
3898                 if (bootverbose) {
3899                         if_printf(ifp, "rx_coal_bds_int -> %u\n",
3900                             sc->bnx_rx_coal_bds_int);
3901                 }
3902         }
3903
3904         if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_INT_CHG) {
3905                 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT,
3906                     sc->bnx_tx_coal_bds_int);
3907                 DELAY(10);
3908                 val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT);
3909
3910                 if (bootverbose) {
3911                         if_printf(ifp, "tx_coal_bds_int -> %u\n",
3912                             sc->bnx_tx_coal_bds_int);
3913                 }
3914         }
3915
3916         sc->bnx_coal_chg = 0;
3917 }
3918
3919 static void
3920 bnx_enable_intr(struct bnx_softc *sc)
3921 {
3922         struct ifnet *ifp = &sc->arpcom.ac_if;
3923
3924         lwkt_serialize_handler_enable(ifp->if_serializer);
3925
3926         /*
3927          * Enable interrupt.
3928          */
3929         bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24);
3930         if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) {
3931                 /* XXX Linux driver */
3932                 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24);
3933         }
3934
3935         /*
3936          * Unmask the interrupt when we stop polling.
3937          */
3938         PCI_CLRBIT(sc->bnx_dev, BGE_PCI_MISC_CTL,
3939             BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
3940
3941         /*
3942          * Trigger another interrupt, since above writing
3943          * to interrupt mailbox0 may acknowledge pending
3944          * interrupt.
3945          */
3946         BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3947 }
3948
3949 static void
3950 bnx_disable_intr(struct bnx_softc *sc)
3951 {
3952         struct ifnet *ifp = &sc->arpcom.ac_if;
3953
3954         /*
3955          * Mask the interrupt when we start polling.
3956          */
3957         PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL,
3958             BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
3959
3960         /*
3961          * Acknowledge possible asserted interrupt.
3962          */
3963         bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3964
3965         lwkt_serialize_handler_disable(ifp->if_serializer);
3966 }
3967
3968 static int
3969 bnx_get_eaddr_mem(struct bnx_softc *sc, uint8_t ether_addr[])
3970 {
3971         uint32_t mac_addr;
3972         int ret = 1;
3973
3974         mac_addr = bnx_readmem_ind(sc, 0x0c14);
3975         if ((mac_addr >> 16) == 0x484b) {
3976                 ether_addr[0] = (uint8_t)(mac_addr >> 8);
3977                 ether_addr[1] = (uint8_t)mac_addr;
3978                 mac_addr = bnx_readmem_ind(sc, 0x0c18);
3979                 ether_addr[2] = (uint8_t)(mac_addr >> 24);
3980                 ether_addr[3] = (uint8_t)(mac_addr >> 16);
3981                 ether_addr[4] = (uint8_t)(mac_addr >> 8);
3982                 ether_addr[5] = (uint8_t)mac_addr;
3983                 ret = 0;
3984         }
3985         return ret;
3986 }
3987
3988 static int
3989 bnx_get_eaddr_nvram(struct bnx_softc *sc, uint8_t ether_addr[])
3990 {
3991         int mac_offset = BGE_EE_MAC_OFFSET;
3992
3993         if (BNX_IS_5717_PLUS(sc)) {
3994                 int f;
3995
3996                 f = pci_get_function(sc->bnx_dev);
3997                 if (f & 1)
3998                         mac_offset = BGE_EE_MAC_OFFSET_5717;
3999                 if (f > 1)
4000                         mac_offset += BGE_EE_MAC_OFFSET_5717_OFF;
4001         } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) {
4002                 mac_offset = BGE_EE_MAC_OFFSET_5906;
4003         }
4004
4005         return bnx_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN);
4006 }
4007
4008 static int
4009 bnx_get_eaddr_eeprom(struct bnx_softc *sc, uint8_t ether_addr[])
4010 {
4011         if (sc->bnx_flags & BNX_FLAG_NO_EEPROM)
4012                 return 1;
4013
4014         return bnx_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
4015                                ETHER_ADDR_LEN);
4016 }
4017
4018 static int
4019 bnx_get_eaddr(struct bnx_softc *sc, uint8_t eaddr[])
4020 {
4021         static const bnx_eaddr_fcn_t bnx_eaddr_funcs[] = {
4022                 /* NOTE: Order is critical */
4023                 bnx_get_eaddr_mem,
4024                 bnx_get_eaddr_nvram,
4025                 bnx_get_eaddr_eeprom,
4026                 NULL
4027         };
4028         const bnx_eaddr_fcn_t *func;
4029
4030         for (func = bnx_eaddr_funcs; *func != NULL; ++func) {
4031                 if ((*func)(sc, eaddr) == 0)
4032                         break;
4033         }
4034         return (*func == NULL ? ENXIO : 0);
4035 }
4036
4037 /*
4038  * NOTE: 'm' is not freed upon failure
4039  */
4040 struct mbuf *
4041 bnx_defrag_shortdma(struct mbuf *m)
4042 {
4043         struct mbuf *n;
4044         int found;
4045
4046         /*
4047          * If device receive two back-to-back send BDs with less than
4048          * or equal to 8 total bytes then the device may hang.  The two
4049          * back-to-back send BDs must in the same frame for this failure
4050          * to occur.  Scan mbuf chains and see whether two back-to-back
4051          * send BDs are there.  If this is the case, allocate new mbuf
4052          * and copy the frame to workaround the silicon bug.
4053          */
4054         for (n = m, found = 0; n != NULL; n = n->m_next) {
4055                 if (n->m_len < 8) {
4056                         found++;
4057                         if (found > 1)
4058                                 break;
4059                         continue;
4060                 }
4061                 found = 0;
4062         }
4063
4064         if (found > 1)
4065                 n = m_defrag(m, MB_DONTWAIT);
4066         else
4067                 n = m;
4068         return n;
4069 }
4070
4071 static void
4072 bnx_stop_block(struct bnx_softc *sc, bus_size_t reg, uint32_t bit)
4073 {
4074         int i;
4075
4076         BNX_CLRBIT(sc, reg, bit);
4077         for (i = 0; i < BNX_TIMEOUT; i++) {
4078                 if ((CSR_READ_4(sc, reg) & bit) == 0)
4079                         return;
4080                 DELAY(100);
4081         }
4082 }
4083
4084 static void
4085 bnx_link_poll(struct bnx_softc *sc)
4086 {
4087         uint32_t status;
4088
4089         status = CSR_READ_4(sc, BGE_MAC_STS);
4090         if ((status & sc->bnx_link_chg) || sc->bnx_link_evt) {
4091                 sc->bnx_link_evt = 0;
4092                 sc->bnx_link_upd(sc, status);
4093         }
4094 }
4095
4096 static void
4097 bnx_enable_msi(struct bnx_softc *sc)
4098 {
4099         uint32_t msi_mode;
4100
4101         msi_mode = CSR_READ_4(sc, BGE_MSI_MODE);
4102         msi_mode |= BGE_MSIMODE_ENABLE;
4103         if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) {
4104                 /*
4105                  * NOTE:
4106                  * 5718-PG105-R says that "one shot" mode
4107                  * does not work if MSI is used, however,
4108                  * it obviously works.
4109                  */
4110                 msi_mode &= ~BGE_MSIMODE_ONESHOT_DISABLE;
4111         }
4112         CSR_WRITE_4(sc, BGE_MSI_MODE, msi_mode);
4113 }
4114
4115 static uint32_t
4116 bnx_dma_swap_options(struct bnx_softc *sc)
4117 {
4118         uint32_t dma_options;
4119
4120         dma_options = BGE_MODECTL_WORDSWAP_NONFRAME |
4121             BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA;
4122 #if BYTE_ORDER == BIG_ENDIAN
4123         dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME;
4124 #endif
4125         if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) {
4126                 dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA |
4127                     BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE |
4128                     BGE_MODECTL_HTX2B_ENABLE;
4129         }
4130         return dma_options;
4131 }