Use ether_crc32_le instead of local hack.
[dragonfly.git] / sys / dev / netif / bge / if_bge.c
1 /*
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.29 2003/12/01 21:06:59 ambrisko Exp $
34  * $DragonFly: src/sys/dev/netif/bge/if_bge.c,v 1.32 2005/05/21 09:07:52 joerg Exp $
35  *
36  */
37
38 /*
39  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
40  * 
41  * Written by Bill Paul <wpaul@windriver.com>
42  * Senior Engineer, Wind River Systems
43  */
44
45 /*
46  * The Broadcom BCM5700 is based on technology originally developed by
47  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
49  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
51  * frames, highly configurable RX filtering, and 16 RX and TX queues
52  * (which, along with RX filter rules, can be used for QOS applications).
53  * Other features, such as TCP segmentation, may be available as part
54  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55  * firmware images can be stored in hardware and need not be compiled
56  * into the driver.
57  *
58  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
60  * 
61  * The BCM5701 is a single-chip solution incorporating both the BCM5700
62  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63  * does not support external SSRAM.
64  *
65  * Broadcom also produces a variation of the BCM5700 under the "Altima"
66  * brand name, which is functionally similar but lacks PCI-X support.
67  *
68  * Without external SSRAM, you can only have at most 4 TX rings,
69  * and the use of the mini RX ring is disabled. This seems to imply
70  * that these features are simply not available on the BCM5701. As a
71  * result, this driver does not implement any support for the mini RX
72  * ring.
73  */
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/sockio.h>
78 #include <sys/mbuf.h>
79 #include <sys/malloc.h>
80 #include <sys/kernel.h>
81 #include <sys/socket.h>
82 #include <sys/queue.h>
83
84 #include <net/if.h>
85 #include <net/ifq_var.h>
86 #include <net/if_arp.h>
87 #include <net/ethernet.h>
88 #include <net/if_dl.h>
89 #include <net/if_media.h>
90
91 #include <net/bpf.h>
92
93 #include <net/if_types.h>
94 #include <net/vlan/if_vlan_var.h>
95
96 #include <netinet/in_systm.h>
97 #include <netinet/in.h>
98 #include <netinet/ip.h>
99
100 #include <vm/vm.h>              /* for vtophys */
101 #include <vm/pmap.h>            /* for vtophys */
102 #include <machine/resource.h>
103 #include <sys/bus.h>
104 #include <sys/rman.h>
105
106 #include <dev/netif/mii_layer/mii.h>
107 #include <dev/netif/mii_layer/miivar.h>
108 #include <dev/netif/mii_layer/miidevs.h>
109 #include <dev/netif/mii_layer/brgphyreg.h>
110
111 #include <bus/pci/pcidevs.h>
112 #include <bus/pci/pcireg.h>
113 #include <bus/pci/pcivar.h>
114
115 #include "if_bgereg.h"
116
117 #define BGE_CSUM_FEATURES       (CSUM_IP | CSUM_TCP | CSUM_UDP)
118
119 /* "controller miibus0" required.  See GENERIC if you get errors here. */
120 #include "miibus_if.h"
121
122 /*
123  * Various supported device vendors/types and their names. Note: the
124  * spec seems to indicate that the hardware still has Alteon's vendor
125  * ID burned into it, though it will always be overriden by the vendor
126  * ID in the EEPROM. Just to be safe, we cover all possibilities.
127  */
128 #define BGE_DEVDESC_MAX         64      /* Maximum device description length */
129
130 static struct bge_type bge_devs[] = {
131         { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700,
132                 "Broadcom BCM5700 Gigabit Ethernet" },
133         { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700,
134                 "Broadcom BCM5701 Gigabit Ethernet" },
135         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700,
136                 "Broadcom BCM5700 Gigabit Ethernet" },
137         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701,
138                 "Broadcom BCM5701 Gigabit Ethernet" },
139         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X,
140                 "Broadcom BCM5702X Gigabit Ethernet" },
141         { PCI_VENDOR_BROADCOM, BCOM_DEVICEID_BCM5702X,
142                 "Broadcom BCM5702X Gigabit Ethernet" },
143         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X,
144                 "Broadcom BCM5703X Gigabit Ethernet" },
145         { PCI_VENDOR_BROADCOM, BCOM_DEVICEID_BCM5703X,
146                 "Broadcom BCM5703X Gigabit Ethernet" },
147         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C,
148                 "Broadcom BCM5704C Dual Gigabit Ethernet" },
149         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S,
150                 "Broadcom BCM5704S Dual Gigabit Ethernet" },
151         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705,
152                 "Broadcom BCM5705 Gigabit Ethernet" },
153         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M,
154                 "Broadcom BCM5705M Gigabit Ethernet" },
155         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705_ALT,
156                 "Broadcom BCM5705M Gigabit Ethernet" },
157         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782,
158                 "Broadcom BCM5782 Gigabit Ethernet" },
159         { PCI_VENDOR_BROADCOM, BCOM_DEVICEID_BCM5788,
160                 "Broadcom BCM5788 Gigabit Ethernet" },
161         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901,
162                 "Broadcom BCM5901 Fast Ethernet" },
163         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2,
164                 "Broadcom BCM5901A2 Fast Ethernet" },
165         { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
166                 "SysKonnect Gigabit Ethernet" },
167         { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000,
168                 "Altima AC1000 Gigabit Ethernet" },
169         { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001,
170                 "Altima AC1002 Gigabit Ethernet" },
171         { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100,
172                 "Altima AC9100 Gigabit Ethernet" },
173         { 0, 0, NULL }
174 };
175
176 static int      bge_probe(device_t);
177 static int      bge_attach(device_t);
178 static int      bge_detach(device_t);
179 static void     bge_release_resources(struct bge_softc *);
180 static void     bge_txeof(struct bge_softc *);
181 static void     bge_rxeof(struct bge_softc *);
182
183 static void     bge_tick(void *);
184 static void     bge_stats_update(struct bge_softc *);
185 static void     bge_stats_update_regs(struct bge_softc *);
186 static int      bge_encap(struct bge_softc *, struct mbuf *, uint32_t *);
187
188 static void     bge_intr(void *);
189 static void     bge_start(struct ifnet *);
190 static int      bge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
191 static void     bge_init(void *);
192 static void     bge_stop(struct bge_softc *);
193 static void     bge_watchdog(struct ifnet *);
194 static void     bge_shutdown(device_t);
195 static int      bge_ifmedia_upd(struct ifnet *);
196 static void     bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
197
198 static uint8_t  bge_eeprom_getbyte(struct bge_softc *, uint32_t, uint8_t *);
199 static int      bge_read_eeprom(struct bge_softc *, caddr_t, uint32_t, size_t);
200
201 static void     bge_setmulti(struct bge_softc *);
202
203 static void     bge_handle_events(struct bge_softc *);
204 static int      bge_alloc_jumbo_mem(struct bge_softc *);
205 static void     bge_free_jumbo_mem(struct bge_softc *);
206 static void     *bge_jalloc(struct bge_softc *);
207 static void     bge_jfree(caddr_t, u_int);
208 static void     bge_jref(caddr_t, u_int);
209 static int      bge_newbuf_std(struct bge_softc *, int, struct mbuf *);
210 static int      bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
211 static int      bge_init_rx_ring_std(struct bge_softc *);
212 static void     bge_free_rx_ring_std(struct bge_softc *);
213 static int      bge_init_rx_ring_jumbo(struct bge_softc *);
214 static void     bge_free_rx_ring_jumbo(struct bge_softc *);
215 static void     bge_free_tx_ring(struct bge_softc *);
216 static int      bge_init_tx_ring(struct bge_softc *);
217
218 static int      bge_chipinit(struct bge_softc *);
219 static int      bge_blockinit(struct bge_softc *);
220
221 #ifdef notdef
222 static uint8_t  bge_vpd_readbyte(struct bge_softc *, uint32_t);
223 static void     bge_vpd_read_res(struct bge_softc *, struct vpd_res *, uint32_t);
224 static void     bge_vpd_read(struct bge_softc *);
225 #endif
226
227 static uint32_t bge_readmem_ind(struct bge_softc *, uint32_t);
228 static void     bge_writemem_ind(struct bge_softc *, uint32_t, uint32_t);
229 #ifdef notdef
230 static uint32_t bge_readreg_ind(struct bge_softc *, uint32_t);
231 #endif
232 static void     bge_writereg_ind(struct bge_softc *, uint32_t, uint32_t);
233
234 static int      bge_miibus_readreg(device_t, int, int);
235 static int      bge_miibus_writereg(device_t, int, int, int);
236 static void     bge_miibus_statchg(device_t);
237
238 static void     bge_reset(struct bge_softc *);
239
240 static device_method_t bge_methods[] = {
241         /* Device interface */
242         DEVMETHOD(device_probe,         bge_probe),
243         DEVMETHOD(device_attach,        bge_attach),
244         DEVMETHOD(device_detach,        bge_detach),
245         DEVMETHOD(device_shutdown,      bge_shutdown),
246
247         /* bus interface */
248         DEVMETHOD(bus_print_child,      bus_generic_print_child),
249         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
250
251         /* MII interface */
252         DEVMETHOD(miibus_readreg,       bge_miibus_readreg),
253         DEVMETHOD(miibus_writereg,      bge_miibus_writereg),
254         DEVMETHOD(miibus_statchg,       bge_miibus_statchg),
255
256         { 0, 0 }
257 };
258
259 static DEFINE_CLASS_0(bge, bge_driver, bge_methods, sizeof(struct bge_softc));
260 static devclass_t bge_devclass;
261
262 DECLARE_DUMMY_MODULE(if_bge);
263 DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, 0, 0);
264 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
265
266 static uint32_t
267 bge_readmem_ind(struct bge_softc *sc, uint32_t off)
268 {
269         device_t dev = sc->bge_dev;
270
271         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
272         return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
273 }
274
275 static void
276 bge_writemem_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
277 {
278         device_t dev = sc->bge_dev;
279
280         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
281         pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
282 }
283
284 #ifdef notdef
285 static uint32_t
286 bge_readreg_ind(struct bge_softc *sc, uin32_t off)
287 {
288         device_t dev = sc->bge_dev;
289
290         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
291         return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
292 }
293 #endif
294
295 static void
296 bge_writereg_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
297 {
298         device_t dev = sc->bge_dev;
299
300         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
301         pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
302 }
303
304 #ifdef notdef
305 static uint8_t
306 bge_vpd_readbyte(struct bge_softc *sc, uint32_t addr)
307 {
308         device_t dev = sc->bge_dev;
309         uint32_t val;
310         int i;
311
312         pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
313         for (i = 0; i < BGE_TIMEOUT * 10; i++) {
314                 DELAY(10);
315                 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
316                         break;
317         }
318
319         if (i == BGE_TIMEOUT) {
320                 device_printf(sc->bge_dev, "VPD read timed out\n");
321                 return(0);
322         }
323
324         val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
325
326         return((val >> ((addr % 4) * 8)) & 0xFF);
327 }
328
329 static void
330 bge_vpd_read_res(struct bge_softc *sc, struct vpd_res *res, uint32_t addr)
331 {
332         size_t i;
333         uint8_t *ptr;
334
335         ptr = (uint8_t *)res;
336         for (i = 0; i < sizeof(struct vpd_res); i++)
337                 ptr[i] = bge_vpd_readbyte(sc, i + addr);
338
339         return;
340 }
341
342 static void
343 bge_vpd_read(struct bge_softc *sc)
344 {
345         int pos = 0, i;
346         struct vpd_res res;
347
348         if (sc->bge_vpd_prodname != NULL)
349                 free(sc->bge_vpd_prodname, M_DEVBUF);
350         if (sc->bge_vpd_readonly != NULL)
351                 free(sc->bge_vpd_readonly, M_DEVBUF);
352         sc->bge_vpd_prodname = NULL;
353         sc->bge_vpd_readonly = NULL;
354
355         bge_vpd_read_res(sc, &res, pos);
356
357         if (res.vr_id != VPD_RES_ID) {
358                 device_printf(sc->bge_dev,
359                               "bad VPD resource id: expected %x got %x\n",
360                               VPD_RES_ID, res.vr_id);
361                 return;
362         }
363
364         pos += sizeof(res);
365         sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_INTWAIT);
366         for (i = 0; i < res.vr_len; i++)
367                 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
368         sc->bge_vpd_prodname[i] = '\0';
369         pos += i;
370
371         bge_vpd_read_res(sc, &res, pos);
372
373         if (res.vr_id != VPD_RES_READ) {
374                 device_printf(sc->bge_dev,
375                               "bad VPD resource id: expected %x got %x\n",
376                               VPD_RES_READ, res.vr_id);
377                 return;
378         }
379
380         pos += sizeof(res);
381         sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_INTWAIT);
382         for (i = 0; i < res.vr_len + 1; i++)
383                 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
384 }
385 #endif
386
387 /*
388  * Read a byte of data stored in the EEPROM at address 'addr.' The
389  * BCM570x supports both the traditional bitbang interface and an
390  * auto access interface for reading the EEPROM. We use the auto
391  * access method.
392  */
393 static uint8_t
394 bge_eeprom_getbyte(struct bge_softc *sc, uint32_t addr, uint8_t *dest)
395 {
396         int i;
397         uint32_t byte = 0;
398
399         /*
400          * Enable use of auto EEPROM access so we can avoid
401          * having to use the bitbang method.
402          */
403         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
404
405         /* Reset the EEPROM, load the clock period. */
406         CSR_WRITE_4(sc, BGE_EE_ADDR,
407             BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
408         DELAY(20);
409
410         /* Issue the read EEPROM command. */
411         CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
412
413         /* Wait for completion */
414         for(i = 0; i < BGE_TIMEOUT * 10; i++) {
415                 DELAY(10);
416                 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
417                         break;
418         }
419
420         if (i == BGE_TIMEOUT) {
421                 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
422                 return(0);
423         }
424
425         /* Get result. */
426         byte = CSR_READ_4(sc, BGE_EE_DATA);
427
428         *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
429
430         return(0);
431 }
432
433 /*
434  * Read a sequence of bytes from the EEPROM.
435  */
436 static int
437 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, uint32_t off, size_t len)
438 {
439         size_t i;
440         int err;
441         uint8_t byte;
442
443         for (byte = 0, err = 0, i = 0; i < len; i++) {
444                 err = bge_eeprom_getbyte(sc, off + i, &byte);
445                 if (err)
446                         break;
447                 *(dest + i) = byte;
448         }
449
450         return(err ? 1 : 0);
451 }
452
453 static int
454 bge_miibus_readreg(device_t dev, int phy, int reg)
455 {
456         struct bge_softc *sc;
457         struct ifnet *ifp;
458         uint32_t val, autopoll;
459         int i;
460
461         sc = device_get_softc(dev);
462         ifp = &sc->arpcom.ac_if;
463
464         /*
465          * Broadcom's own driver always assumes the internal
466          * PHY is at GMII address 1. On some chips, the PHY responds
467          * to accesses at all addresses, which could cause us to
468          * bogusly attach the PHY 32 times at probe type. Always
469          * restricting the lookup to address 1 is simpler than
470          * trying to figure out which chips revisions should be
471          * special-cased.
472          */
473         if (phy != 1)
474                 return(0);
475
476         /* Reading with autopolling on may trigger PCI errors */
477         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
478         if (autopoll & BGE_MIMODE_AUTOPOLL) {
479                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
480                 DELAY(40);
481         }
482
483         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
484             BGE_MIPHY(phy)|BGE_MIREG(reg));
485
486         for (i = 0; i < BGE_TIMEOUT; i++) {
487                 val = CSR_READ_4(sc, BGE_MI_COMM);
488                 if (!(val & BGE_MICOMM_BUSY))
489                         break;
490         }
491
492         if (i == BGE_TIMEOUT) {
493                 if_printf(ifp, "PHY read timed out\n");
494                 val = 0;
495                 goto done;
496         }
497
498         val = CSR_READ_4(sc, BGE_MI_COMM);
499
500 done:
501         if (autopoll & BGE_MIMODE_AUTOPOLL) {
502                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
503                 DELAY(40);
504         }
505
506         if (val & BGE_MICOMM_READFAIL)
507                 return(0);
508
509         return(val & 0xFFFF);
510 }
511
512 static int
513 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
514 {
515         struct bge_softc *sc;
516         uint32_t autopoll;
517         int i;
518
519         sc = device_get_softc(dev);
520
521         /* Reading with autopolling on may trigger PCI errors */
522         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
523         if (autopoll & BGE_MIMODE_AUTOPOLL) {
524                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
525                 DELAY(40);
526         }
527
528         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
529             BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
530
531         for (i = 0; i < BGE_TIMEOUT; i++) {
532                 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
533                         break;
534         }
535
536         if (autopoll & BGE_MIMODE_AUTOPOLL) {
537                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
538                 DELAY(40);
539         }
540
541         if (i == BGE_TIMEOUT) {
542                 if_printf(&sc->arpcom.ac_if, "PHY read timed out\n");
543                 return(0);
544         }
545
546         return(0);
547 }
548
549 static void
550 bge_miibus_statchg(device_t dev)
551 {
552         struct bge_softc *sc;
553         struct mii_data *mii;
554
555         sc = device_get_softc(dev);
556         mii = device_get_softc(sc->bge_miibus);
557
558         BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
559         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
560                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
561         } else {
562                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
563         }
564
565         if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
566                 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
567         } else {
568                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
569         }
570 }
571
572 /*
573  * Handle events that have triggered interrupts.
574  */
575 static void
576 bge_handle_events(struct bge_softc *sc)
577 {
578 }
579
580 /*
581  * Memory management for jumbo frames.
582  */
583 static int
584 bge_alloc_jumbo_mem(struct bge_softc *sc)
585 {
586         struct bge_jpool_entry *entry;
587         caddr_t ptr;
588         int i;
589
590         /* Grab a big chunk o' storage. */
591         sc->bge_cdata.bge_jumbo_buf = contigmalloc(BGE_JMEM, M_DEVBUF,
592                 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
593
594         if (sc->bge_cdata.bge_jumbo_buf == NULL) {
595                 if_printf(&sc->arpcom.ac_if, "no memory for jumbo buffers!\n");
596                 return(ENOBUFS);
597         }
598
599         SLIST_INIT(&sc->bge_jfree_listhead);
600         SLIST_INIT(&sc->bge_jinuse_listhead);
601
602         /*
603          * Now divide it up into 9K pieces and save the addresses
604          * in an array. Note that we play an evil trick here by using
605          * the first few bytes in the buffer to hold the the address
606          * of the softc structure for this interface. This is because
607          * bge_jfree() needs it, but it is called by the mbuf management
608          * code which will not pass it to us explicitly.
609          */
610         ptr = sc->bge_cdata.bge_jumbo_buf;
611         for (i = 0; i < BGE_JSLOTS; i++) {
612                 uint64_t **aptr;
613
614                 aptr = (uint64_t **)ptr;
615                 aptr[0] = (uint64_t *)sc;
616                 ptr += sizeof(uint64_t);
617                 sc->bge_cdata.bge_jslots[i].bge_buf = ptr;
618                 sc->bge_cdata.bge_jslots[i].bge_inuse = 0;
619                 ptr += (BGE_JLEN - sizeof(uint64_t));
620                 entry = malloc(sizeof(struct bge_jpool_entry), 
621                                M_DEVBUF, M_INTWAIT);
622                 entry->slot = i;
623                 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
624                     entry, jpool_entries);
625         }
626
627         return(0);
628 }
629
630 static void
631 bge_free_jumbo_mem(struct bge_softc *sc)
632 {
633         struct bge_jpool_entry *entry;
634         int i;
635  
636         for (i = 0; i < BGE_JSLOTS; i++) {
637                 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
638                 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
639                 free(entry, M_DEVBUF);
640         }
641
642         contigfree(sc->bge_cdata.bge_jumbo_buf, BGE_JMEM, M_DEVBUF);
643 }
644
645 /*
646  * Allocate a jumbo buffer.
647  */
648 static void *
649 bge_jalloc(struct bge_softc *sc)
650 {
651         struct bge_jpool_entry   *entry;
652
653         entry = SLIST_FIRST(&sc->bge_jfree_listhead);
654
655         if (entry == NULL) {
656                 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
657                 return(NULL);
658         }
659
660         SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
661         SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
662         sc->bge_cdata.bge_jslots[entry->slot].bge_inuse = 1;
663         return(sc->bge_cdata.bge_jslots[entry->slot].bge_buf);
664 }
665
666 /*
667  * Adjust usage count on a jumbo buffer.
668  */
669 static void
670 bge_jref(caddr_t buf, u_int size)
671 {
672         struct bge_softc *sc;
673         uint64_t **aptr;
674         int i;
675
676         /* Extract the softc struct pointer. */
677         aptr = (uint64_t **)(buf - sizeof(uint64_t));
678         sc = (struct bge_softc *)(aptr[0]);
679
680         if (sc == NULL)
681                 panic("bge_jref: can't find softc pointer!");
682
683         if (size != BGE_JUMBO_FRAMELEN)
684                 panic("bge_jref: adjusting refcount of buf of wrong size!");
685
686         /* calculate the slot this buffer belongs to */
687
688         i = ((vm_offset_t)aptr 
689              - (vm_offset_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
690
691         if ((i < 0) || (i >= BGE_JSLOTS))
692                 panic("bge_jref: asked to reference buffer "
693                     "that we don't manage!");
694         else if (sc->bge_cdata.bge_jslots[i].bge_inuse == 0)
695                 panic("bge_jref: buffer already free!");
696         else
697                 sc->bge_cdata.bge_jslots[i].bge_inuse++;
698 }
699
700 /*
701  * Release a jumbo buffer.
702  */
703 static void
704 bge_jfree(caddr_t buf, u_int size)
705 {
706         struct bge_softc *sc;
707         uint64_t **aptr;
708         struct bge_jpool_entry   *entry;
709         int i;
710
711         /* Extract the softc struct pointer. */
712         aptr = (uint64_t **)(buf - sizeof(uint64_t));
713         sc = (struct bge_softc *)(aptr[0]);
714
715         if (sc == NULL)
716                 panic("bge_jfree: can't find softc pointer!");
717
718         if (size != BGE_JUMBO_FRAMELEN)
719                 panic("bge_jfree: freeing buffer of wrong size!");
720
721         /* calculate the slot this buffer belongs to */
722
723         i = ((vm_offset_t)aptr 
724              - (vm_offset_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
725
726         if ((i < 0) || (i >= BGE_JSLOTS))
727                 panic("bge_jfree: asked to free buffer that we don't manage!");
728         else if (sc->bge_cdata.bge_jslots[i].bge_inuse == 0)
729                 panic("bge_jfree: buffer already free!");
730         else {
731                 sc->bge_cdata.bge_jslots[i].bge_inuse--;
732                 if(sc->bge_cdata.bge_jslots[i].bge_inuse == 0) {
733                         entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
734                         if (entry == NULL)
735                                 panic("bge_jfree: buffer not in use!");
736                         entry->slot = i;
737                         SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, 
738                                           jpool_entries);
739                         SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 
740                                           entry, jpool_entries);
741                 }
742         }
743 }
744
745
746 /*
747  * Intialize a standard receive ring descriptor.
748  */
749 static int
750 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m)
751 {
752         struct mbuf *m_new = NULL;
753         struct bge_rx_bd *r;
754
755         if (m == NULL) {
756                 MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
757                 if (m_new == NULL)
758                         return(ENOBUFS);
759
760                 MCLGET(m_new, MB_DONTWAIT);
761                 if (!(m_new->m_flags & M_EXT)) {
762                         m_freem(m_new);
763                         return(ENOBUFS);
764                 }
765                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
766         } else {
767                 m_new = m;
768                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
769                 m_new->m_data = m_new->m_ext.ext_buf;
770         }
771
772         if (!sc->bge_rx_alignment_bug)
773                 m_adj(m_new, ETHER_ALIGN);
774         sc->bge_cdata.bge_rx_std_chain[i] = m_new;
775         r = &sc->bge_rdata->bge_rx_std_ring[i];
776         BGE_HOSTADDR(r->bge_addr, vtophys(mtod(m_new, caddr_t)));
777         r->bge_flags = BGE_RXBDFLAG_END;
778         r->bge_len = m_new->m_len;
779         r->bge_idx = i;
780
781         return(0);
782 }
783
784 /*
785  * Initialize a jumbo receive ring descriptor. This allocates
786  * a jumbo buffer from the pool managed internally by the driver.
787  */
788 static int
789 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
790 {
791         struct mbuf *m_new = NULL;
792         struct bge_rx_bd *r;
793
794         if (m == NULL) {
795                 caddr_t *buf = NULL;
796
797                 /* Allocate the mbuf. */
798                 MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
799                 if (m_new == NULL)
800                         return(ENOBUFS);
801
802                 /* Allocate the jumbo buffer */
803                 buf = bge_jalloc(sc);
804                 if (buf == NULL) {
805                         m_freem(m_new);
806                         if_printf(&sc->arpcom.ac_if, "jumbo allocation failed "
807                             "-- packet dropped!\n");
808                         return(ENOBUFS);
809                 }
810
811                 /* Attach the buffer to the mbuf. */
812                 m_new->m_data = m_new->m_ext.ext_buf = (void *)buf;
813                 m_new->m_flags |= M_EXT | M_EXT_OLD;
814                 m_new->m_len = m_new->m_pkthdr.len =
815                     m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
816                 m_new->m_ext.ext_nfree.old = bge_jfree;
817                 m_new->m_ext.ext_nref.old = bge_jref;
818         } else {
819                 m_new = m;
820                 m_new->m_data = m_new->m_ext.ext_buf;
821                 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
822         }
823
824         if (!sc->bge_rx_alignment_bug)
825                 m_adj(m_new, ETHER_ALIGN);
826         /* Set up the descriptor. */
827         r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
828         sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
829         BGE_HOSTADDR(r->bge_addr, vtophys(mtod(m_new, caddr_t)));
830         r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
831         r->bge_len = m_new->m_len;
832         r->bge_idx = i;
833
834         return(0);
835 }
836
837 /*
838  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
839  * that's 1MB or memory, which is a lot. For now, we fill only the first
840  * 256 ring entries and hope that our CPU is fast enough to keep up with
841  * the NIC.
842  */
843 static int
844 bge_init_rx_ring_std(struct bge_softc *sc)
845 {
846         int i;
847
848         for (i = 0; i < BGE_SSLOTS; i++) {
849                 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
850                         return(ENOBUFS);
851         };
852
853         sc->bge_std = i - 1;
854         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
855
856         return(0);
857 }
858
859 static void
860 bge_free_rx_ring_std(struct bge_softc *sc)
861 {
862         int i;
863
864         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
865                 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
866                         m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
867                         sc->bge_cdata.bge_rx_std_chain[i] = NULL;
868                 }
869                 bzero(&sc->bge_rdata->bge_rx_std_ring[i],
870                     sizeof(struct bge_rx_bd));
871         }
872 }
873
874 static int
875 bge_init_rx_ring_jumbo(struct bge_softc *sc)
876 {
877         int i;
878         struct bge_rcb *rcb;
879
880         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
881                 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
882                         return(ENOBUFS);
883         };
884
885         sc->bge_jumbo = i - 1;
886
887         rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
888         rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
889         CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
890
891         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
892
893         return(0);
894 }
895
896 static void
897 bge_free_rx_ring_jumbo(struct bge_softc *sc)
898 {
899         int i;
900
901         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
902                 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
903                         m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
904                         sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
905                 }
906                 bzero(&sc->bge_rdata->bge_rx_jumbo_ring[i],
907                     sizeof(struct bge_rx_bd));
908         }
909 }
910
911 static void
912 bge_free_tx_ring(struct bge_softc *sc)
913 {
914         int i;
915
916         if (sc->bge_rdata->bge_tx_ring == NULL)
917                 return;
918
919         for (i = 0; i < BGE_TX_RING_CNT; i++) {
920                 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
921                         m_freem(sc->bge_cdata.bge_tx_chain[i]);
922                         sc->bge_cdata.bge_tx_chain[i] = NULL;
923                 }
924                 bzero(&sc->bge_rdata->bge_tx_ring[i],
925                     sizeof(struct bge_tx_bd));
926         }
927 }
928
929 static int
930 bge_init_tx_ring(struct bge_softc *sc)
931 {
932         sc->bge_txcnt = 0;
933         sc->bge_tx_saved_considx = 0;
934
935         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
936         /* 5700 b2 errata */
937         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
938                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
939
940         CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
941         /* 5700 b2 errata */
942         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
943                 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
944
945         return(0);
946 }
947
948 static void
949 bge_setmulti(struct bge_softc *sc)
950 {
951         struct ifnet *ifp;
952         struct ifmultiaddr *ifma;
953         uint32_t hashes[4] = { 0, 0, 0, 0 };
954         int h, i;
955
956         ifp = &sc->arpcom.ac_if;
957
958         if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
959                 for (i = 0; i < 4; i++)
960                         CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
961                 return;
962         }
963
964         /* First, zot all the existing filters. */
965         for (i = 0; i < 4; i++)
966                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
967
968         /* Now program new ones. */
969         LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
970                 if (ifma->ifma_addr->sa_family != AF_LINK)
971                         continue;
972                 h = ether_crc32_le(
973                     LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
974                     ETHER_ADDR_LEN) & 0x7f;
975                 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
976         }
977
978         for (i = 0; i < 4; i++)
979                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
980 }
981
982 /*
983  * Do endian, PCI and DMA initialization. Also check the on-board ROM
984  * self-test results.
985  */
986 static int
987 bge_chipinit(struct bge_softc *sc)
988 {
989         int i;
990         uint32_t dma_rw_ctl;
991
992         /* Set endianness before we access any non-PCI registers. */
993 #if BYTE_ORDER == BIG_ENDIAN
994         pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
995             BGE_BIGENDIAN_INIT, 4);
996 #else
997         pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
998             BGE_LITTLEENDIAN_INIT, 4);
999 #endif
1000
1001         /*
1002          * Check the 'ROM failed' bit on the RX CPU to see if
1003          * self-tests passed.
1004          */
1005         if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1006                 if_printf(&sc->arpcom.ac_if,
1007                           "RX CPU self-diagnostics failed!\n");
1008                 return(ENODEV);
1009         }
1010
1011         /* Clear the MAC control register */
1012         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1013
1014         /*
1015          * Clear the MAC statistics block in the NIC's
1016          * internal memory.
1017          */
1018         for (i = BGE_STATS_BLOCK;
1019             i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1020                 BGE_MEMWIN_WRITE(sc, i, 0);
1021
1022         for (i = BGE_STATUS_BLOCK;
1023             i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1024                 BGE_MEMWIN_WRITE(sc, i, 0);
1025
1026         /* Set up the PCI DMA control register. */
1027         if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1028             BGE_PCISTATE_PCI_BUSMODE) {
1029                 /* Conventional PCI bus */
1030                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1031                     (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1032                     (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1033                     (0x0F);
1034         } else {
1035                 /* PCI-X bus */
1036                 /*
1037                  * The 5704 uses a different encoding of read/write
1038                  * watermarks.
1039                  */
1040                 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1041                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1042                             (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1043                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1044                 else
1045                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1046                             (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1047                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1048                             (0x0F);
1049
1050                 /*
1051                  * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1052                  * for hardware bugs.
1053                  */
1054                 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1055                     sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1056                         uint32_t tmp;
1057
1058                         tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1059                         if (tmp == 0x6 || tmp == 0x7)
1060                                 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1061                 }
1062         }
1063
1064         if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1065             sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1066             sc->bge_asicrev == BGE_ASICREV_BCM5705)
1067                 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1068         pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1069
1070         /*
1071          * Set up general mode register.
1072          */
1073         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME|
1074             BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1075             BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1076             BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
1077
1078         /*
1079          * Disable memory write invalidate.  Apparently it is not supported
1080          * properly by these devices.
1081          */
1082         PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1083
1084         /* Set the timer prescaler (always 66Mhz) */
1085         CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1086
1087         return(0);
1088 }
1089
1090 static int
1091 bge_blockinit(struct bge_softc *sc)
1092 {
1093         struct bge_rcb *rcb;
1094         volatile struct bge_rcb *vrcb;
1095         int i;
1096
1097         /*
1098          * Initialize the memory window pointer register so that
1099          * we can access the first 32K of internal NIC RAM. This will
1100          * allow us to set up the TX send ring RCBs and the RX return
1101          * ring RCBs, plus other things which live in NIC memory.
1102          */
1103         CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1104
1105         /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1106
1107         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1108                 /* Configure mbuf memory pool */
1109                 if (sc->bge_extram) {
1110                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1111                             BGE_EXT_SSRAM);
1112                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1113                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1114                         else
1115                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1116                 } else {
1117                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1118                             BGE_BUFFPOOL_1);
1119                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1120                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1121                         else
1122                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1123                 }
1124
1125                 /* Configure DMA resource pool */
1126                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1127                     BGE_DMA_DESCRIPTORS);
1128                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1129         }
1130
1131         /* Configure mbuf pool watermarks */
1132         if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
1133                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1134                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1135         } else {
1136                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1137                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1138         }
1139         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1140
1141         /* Configure DMA resource watermarks */
1142         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1143         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1144
1145         /* Enable buffer manager */
1146         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1147                 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1148                     BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1149
1150                 /* Poll for buffer manager start indication */
1151                 for (i = 0; i < BGE_TIMEOUT; i++) {
1152                         if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1153                                 break;
1154                         DELAY(10);
1155                 }
1156
1157                 if (i == BGE_TIMEOUT) {
1158                         if_printf(&sc->arpcom.ac_if,
1159                                   "buffer manager failed to start\n");
1160                         return(ENXIO);
1161                 }
1162         }
1163
1164         /* Enable flow-through queues */
1165         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1166         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1167
1168         /* Wait until queue initialization is complete */
1169         for (i = 0; i < BGE_TIMEOUT; i++) {
1170                 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1171                         break;
1172                 DELAY(10);
1173         }
1174
1175         if (i == BGE_TIMEOUT) {
1176                 if_printf(&sc->arpcom.ac_if,
1177                           "flow-through queue init failed\n");
1178                 return(ENXIO);
1179         }
1180
1181         /* Initialize the standard RX ring control block */
1182         rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1183         BGE_HOSTADDR(rcb->bge_hostaddr,
1184             vtophys(&sc->bge_rdata->bge_rx_std_ring));
1185         if (sc->bge_asicrev == BGE_ASICREV_BCM5705)
1186                 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1187         else
1188                 rcb->bge_maxlen_flags =
1189                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1190         if (sc->bge_extram)
1191                 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1192         else
1193                 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1194         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1195         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1196         CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1197         CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1198
1199         /*
1200          * Initialize the jumbo RX ring control block
1201          * We set the 'ring disabled' bit in the flags
1202          * field until we're actually ready to start
1203          * using this ring (i.e. once we set the MTU
1204          * high enough to require it).
1205          */
1206         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1207                 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1208                 BGE_HOSTADDR(rcb->bge_hostaddr,
1209                     vtophys(&sc->bge_rdata->bge_rx_jumbo_ring));
1210                 rcb->bge_maxlen_flags =
1211                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1212                     BGE_RCB_FLAG_RING_DISABLED);
1213                 if (sc->bge_extram)
1214                         rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1215                 else
1216                         rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1217                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1218                     rcb->bge_hostaddr.bge_addr_hi);
1219                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1220                     rcb->bge_hostaddr.bge_addr_lo);
1221                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1222                     rcb->bge_maxlen_flags);
1223                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1224
1225                 /* Set up dummy disabled mini ring RCB */
1226                 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
1227                 rcb->bge_maxlen_flags =
1228                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1229                 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1230                     rcb->bge_maxlen_flags);
1231         }
1232
1233         /*
1234          * Set the BD ring replentish thresholds. The recommended
1235          * values are 1/8th the number of descriptors allocated to
1236          * each ring.
1237          */
1238         CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1239         CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1240
1241         /*
1242          * Disable all unused send rings by setting the 'ring disabled'
1243          * bit in the flags field of all the TX send ring control blocks.
1244          * These are located in NIC memory.
1245          */
1246         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1247             BGE_SEND_RING_RCB);
1248         for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1249                 vrcb->bge_maxlen_flags =
1250                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1251                 vrcb->bge_nicaddr = 0;
1252                 vrcb++;
1253         }
1254
1255         /* Configure TX RCB 0 (we use only the first ring) */
1256         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1257             BGE_SEND_RING_RCB);
1258         vrcb->bge_hostaddr.bge_addr_hi = 0;
1259         BGE_HOSTADDR(vrcb->bge_hostaddr, vtophys(&sc->bge_rdata->bge_tx_ring));
1260         vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT);
1261         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1262                 vrcb->bge_maxlen_flags =
1263                     BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0);
1264
1265         /* Disable all unused RX return rings */
1266         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1267             BGE_RX_RETURN_RING_RCB);
1268         for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1269                 vrcb->bge_hostaddr.bge_addr_hi = 0;
1270                 vrcb->bge_hostaddr.bge_addr_lo = 0;
1271                 vrcb->bge_maxlen_flags =
1272                     BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1273                     BGE_RCB_FLAG_RING_DISABLED);
1274                 vrcb->bge_nicaddr = 0;
1275                 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1276                     (i * (sizeof(uint64_t))), 0);
1277                 vrcb++;
1278         }
1279
1280         /* Initialize RX ring indexes */
1281         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1282         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1283         CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1284
1285         /*
1286          * Set up RX return ring 0
1287          * Note that the NIC address for RX return rings is 0x00000000.
1288          * The return rings live entirely within the host, so the
1289          * nicaddr field in the RCB isn't used.
1290          */
1291         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1292             BGE_RX_RETURN_RING_RCB);
1293         vrcb->bge_hostaddr.bge_addr_hi = 0;
1294         BGE_HOSTADDR(vrcb->bge_hostaddr,
1295             vtophys(&sc->bge_rdata->bge_rx_return_ring));
1296         vrcb->bge_nicaddr = 0x00000000;
1297         vrcb->bge_maxlen_flags =
1298             BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0);
1299
1300         /* Set random backoff seed for TX */
1301         CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1302             sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1303             sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1304             sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1305             BGE_TX_BACKOFF_SEED_MASK);
1306
1307         /* Set inter-packet gap */
1308         CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1309
1310         /*
1311          * Specify which ring to use for packets that don't match
1312          * any RX rules.
1313          */
1314         CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1315
1316         /*
1317          * Configure number of RX lists. One interrupt distribution
1318          * list, sixteen active lists, one bad frames class.
1319          */
1320         CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1321
1322         /* Inialize RX list placement stats mask. */
1323         CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1324         CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1325
1326         /* Disable host coalescing until we get it set up */
1327         CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1328
1329         /* Poll to make sure it's shut down. */
1330         for (i = 0; i < BGE_TIMEOUT; i++) {
1331                 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1332                         break;
1333                 DELAY(10);
1334         }
1335
1336         if (i == BGE_TIMEOUT) {
1337                 if_printf(&sc->arpcom.ac_if,
1338                           "host coalescing engine failed to idle\n");
1339                 return(ENXIO);
1340         }
1341
1342         /* Set up host coalescing defaults */
1343         CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1344         CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1345         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1346         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1347         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1348                 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1349                 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1350         }
1351         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1352         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1353
1354         /* Set up address of statistics block */
1355         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1356                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 0);
1357                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1358                     vtophys(&sc->bge_rdata->bge_info.bge_stats));
1359
1360                 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1361                 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1362                 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1363         }
1364
1365         /* Set up address of status block */
1366         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 0);
1367         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1368             vtophys(&sc->bge_rdata->bge_status_block));
1369
1370         sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
1371         sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
1372
1373         /* Turn on host coalescing state machine */
1374         CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1375
1376         /* Turn on RX BD completion state machine and enable attentions */
1377         CSR_WRITE_4(sc, BGE_RBDC_MODE,
1378             BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1379
1380         /* Turn on RX list placement state machine */
1381         CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1382
1383         /* Turn on RX list selector state machine. */
1384         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1385                 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1386
1387         /* Turn on DMA, clear stats */
1388         CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1389             BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1390             BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1391             BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1392             (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1393
1394         /* Set misc. local control, enable interrupts on attentions */
1395         CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1396
1397 #ifdef notdef
1398         /* Assert GPIO pins for PHY reset */
1399         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1400             BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1401         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1402             BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1403 #endif
1404
1405         /* Turn on DMA completion state machine */
1406         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1407                 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1408
1409         /* Turn on write DMA state machine */
1410         CSR_WRITE_4(sc, BGE_WDMA_MODE,
1411             BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1412         
1413         /* Turn on read DMA state machine */
1414         CSR_WRITE_4(sc, BGE_RDMA_MODE,
1415             BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1416
1417         /* Turn on RX data completion state machine */
1418         CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1419
1420         /* Turn on RX BD initiator state machine */
1421         CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1422
1423         /* Turn on RX data and RX BD initiator state machine */
1424         CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1425
1426         /* Turn on Mbuf cluster free state machine */
1427         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1428                 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1429
1430         /* Turn on send BD completion state machine */
1431         CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1432
1433         /* Turn on send data completion state machine */
1434         CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1435
1436         /* Turn on send data initiator state machine */
1437         CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1438
1439         /* Turn on send BD initiator state machine */
1440         CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1441
1442         /* Turn on send BD selector state machine */
1443         CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1444
1445         CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1446         CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1447             BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1448
1449         /* ack/clear link change events */
1450         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1451             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1452             BGE_MACSTAT_LINK_CHANGED);
1453
1454         /* Enable PHY auto polling (for MII/GMII only) */
1455         if (sc->bge_tbi) {
1456                 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1457         } else {
1458                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1459                 if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1460                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1461                             BGE_EVTENB_MI_INTERRUPT);
1462         }
1463
1464         /* Enable link state change attentions. */
1465         BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1466
1467         return(0);
1468 }
1469
1470 /*
1471  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1472  * against our list and return its name if we find a match. Note
1473  * that since the Broadcom controller contains VPD support, we
1474  * can get the device name string from the controller itself instead
1475  * of the compiled-in string. This is a little slow, but it guarantees
1476  * we'll always announce the right product name.
1477  */
1478 static int
1479 bge_probe(device_t dev)
1480 {
1481         struct bge_softc *sc;
1482         struct bge_type *t;
1483         char *descbuf;
1484         uint16_t product, vendor;
1485
1486         product = pci_get_device(dev);
1487         vendor = pci_get_vendor(dev);
1488
1489         for (t = bge_devs; t->bge_name != NULL; t++) {
1490                 if (vendor == t->bge_vid && product == t->bge_did)
1491                         break;
1492         }
1493
1494         if (t->bge_name == NULL)
1495                 return(ENXIO);
1496
1497         sc = device_get_softc(dev);
1498 #ifdef notdef
1499         sc->bge_dev = dev;
1500
1501         bge_vpd_read(sc);
1502         device_set_desc(dev, sc->bge_vpd_prodname);
1503 #endif
1504         descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_WAITOK);
1505         snprintf(descbuf, BGE_DEVDESC_MAX, "%s, ASIC rev. %#04x", t->bge_name,
1506             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1507         device_set_desc_copy(dev, descbuf);
1508         if (pci_get_subvendor(dev) == PCI_VENDOR_DELL)
1509                 sc->bge_no_3_led = 1;
1510         free(descbuf, M_TEMP);
1511         return(0);
1512 }
1513
1514 static int
1515 bge_attach(device_t dev)
1516 {
1517         int s;
1518         uint32_t command;
1519         struct ifnet *ifp;
1520         struct bge_softc *sc;
1521         uint32_t hwcfg = 0;
1522         uint32_t mac_addr = 0;
1523         int error = 0, rid;
1524         uint8_t ether_addr[ETHER_ADDR_LEN];
1525
1526         s = splimp();
1527
1528         sc = device_get_softc(dev);
1529         sc->bge_dev = dev;
1530         callout_init(&sc->bge_stat_timer);
1531
1532         /*
1533          * Map control/status registers.
1534          */
1535         pci_enable_busmaster(dev);
1536         pci_enable_io(dev, SYS_RES_MEMORY);
1537         command = pci_read_config(dev, PCIR_COMMAND, 4);
1538
1539         if (!(command & PCIM_CMD_MEMEN)) {
1540                 device_printf(dev, "failed to enable memory mapping!\n");
1541                 error = ENXIO;
1542                 goto fail;
1543         }
1544
1545         rid = BGE_PCI_BAR0;
1546         sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1547             RF_ACTIVE);
1548
1549         if (sc->bge_res == NULL) {
1550                 device_printf(dev, "couldn't map memory\n");
1551                 error = ENXIO;
1552                 goto fail;
1553         }
1554
1555         sc->bge_btag = rman_get_bustag(sc->bge_res);
1556         sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
1557         sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res);
1558
1559         /* Allocate interrupt */
1560         rid = 0;
1561
1562         sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1563             RF_SHAREABLE | RF_ACTIVE);
1564
1565         if (sc->bge_irq == NULL) {
1566                 device_printf(dev, "couldn't map interrupt\n");
1567                 error = ENXIO;
1568                 goto fail;
1569         }
1570
1571         error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET,
1572            bge_intr, sc, &sc->bge_intrhand);
1573
1574         if (error) {
1575                 bge_release_resources(sc);
1576                 device_printf(dev, "couldn't set up irq\n");
1577                 goto fail;
1578         }
1579
1580         ifp = &sc->arpcom.ac_if;
1581         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1582
1583         /* Try to reset the chip. */
1584         bge_reset(sc);
1585
1586         if (bge_chipinit(sc)) {
1587                 device_printf(dev, "chip initialization failed\n");
1588                 bge_release_resources(sc);
1589                 error = ENXIO;
1590                 goto fail;
1591         }
1592
1593         /*
1594          * Get station address from the EEPROM.
1595          */
1596         mac_addr = bge_readmem_ind(sc, 0x0c14);
1597         if ((mac_addr >> 16) == 0x484b) {
1598                 ether_addr[0] = (uint8_t)(mac_addr >> 8);
1599                 ether_addr[1] = (uint8_t)mac_addr;
1600                 mac_addr = bge_readmem_ind(sc, 0x0c18);
1601                 ether_addr[2] = (uint8_t)(mac_addr >> 24);
1602                 ether_addr[3] = (uint8_t)(mac_addr >> 16);
1603                 ether_addr[4] = (uint8_t)(mac_addr >> 8);
1604                 ether_addr[5] = (uint8_t)mac_addr;
1605         } else if (bge_read_eeprom(sc, ether_addr,
1606             BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
1607                 device_printf(dev, "failed to read station address\n");
1608                 bge_release_resources(sc);
1609                 error = ENXIO;
1610                 goto fail;
1611         }
1612
1613         /* Allocate the general information block and ring buffers. */
1614         sc->bge_rdata = contigmalloc(sizeof(struct bge_ring_data), M_DEVBUF,
1615             M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1616
1617         if (sc->bge_rdata == NULL) {
1618                 bge_release_resources(sc);
1619                 error = ENXIO;
1620                 device_printf(dev, "no memory for list buffers!\n");
1621                 goto fail;
1622         }
1623
1624         bzero(sc->bge_rdata, sizeof(struct bge_ring_data));
1625
1626         /* Save ASIC rev. */
1627
1628         sc->bge_chipid =
1629             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1630             BGE_PCIMISCCTL_ASICREV;
1631         sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
1632         sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
1633
1634         /*
1635          * Try to allocate memory for jumbo buffers.
1636          * The 5705 does not appear to support jumbo frames.
1637          */
1638         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1639                 if (bge_alloc_jumbo_mem(sc)) {
1640                         device_printf(dev, "jumbo buffer allocation failed\n");
1641                         bge_release_resources(sc);
1642                         error = ENXIO;
1643                         goto fail;
1644                 }
1645         }
1646
1647         /* Set default tuneable values. */
1648         sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
1649         sc->bge_rx_coal_ticks = 150;
1650         sc->bge_tx_coal_ticks = 150;
1651         sc->bge_rx_max_coal_bds = 64;
1652         sc->bge_tx_max_coal_bds = 128;
1653
1654         /* 5705 limits RX return ring to 512 entries. */
1655         if (sc->bge_asicrev == BGE_ASICREV_BCM5705)
1656                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1657         else
1658                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
1659
1660         /* Set up ifnet structure */
1661         ifp->if_softc = sc;
1662         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1663         ifp->if_ioctl = bge_ioctl;
1664         ifp->if_start = bge_start;
1665         ifp->if_watchdog = bge_watchdog;
1666         ifp->if_init = bge_init;
1667         ifp->if_mtu = ETHERMTU;
1668         ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1669         ifq_set_ready(&ifp->if_snd);
1670         ifp->if_hwassist = BGE_CSUM_FEATURES;
1671         ifp->if_capabilities = IFCAP_HWCSUM;
1672         ifp->if_capenable = ifp->if_capabilities;
1673
1674         /*
1675          * Figure out what sort of media we have by checking the
1676          * hardware config word in the first 32k of NIC internal memory,
1677          * or fall back to examining the EEPROM if necessary.
1678          * Note: on some BCM5700 cards, this value appears to be unset.
1679          * If that's the case, we have to rely on identifying the NIC
1680          * by its PCI subsystem ID, as we do below for the SysKonnect
1681          * SK-9D41.
1682          */
1683         if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
1684                 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
1685         else {
1686                 bge_read_eeprom(sc, (caddr_t)&hwcfg,
1687                                 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
1688                 hwcfg = ntohl(hwcfg);
1689         }
1690
1691         if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
1692                 sc->bge_tbi = 1;
1693
1694         /* The SysKonnect SK-9D41 is a 1000baseSX card. */
1695         if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41)
1696                 sc->bge_tbi = 1;
1697
1698         if (sc->bge_tbi) {
1699                 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
1700                     bge_ifmedia_upd, bge_ifmedia_sts);
1701                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
1702                 ifmedia_add(&sc->bge_ifmedia,
1703                     IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
1704                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1705                 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
1706         } else {
1707                 /*
1708                  * Do transceiver setup.
1709                  */
1710                 if (mii_phy_probe(dev, &sc->bge_miibus,
1711                     bge_ifmedia_upd, bge_ifmedia_sts)) {
1712                         device_printf(dev, "MII without any PHY!\n");
1713                         bge_release_resources(sc);
1714                         bge_free_jumbo_mem(sc);
1715                         error = ENXIO;
1716                         goto fail;
1717                 }
1718         }
1719
1720         /*
1721          * When using the BCM5701 in PCI-X mode, data corruption has
1722          * been observed in the first few bytes of some received packets.
1723          * Aligning the packet buffer in memory eliminates the corruption.
1724          * Unfortunately, this misaligns the packet payloads.  On platforms
1725          * which do not support unaligned accesses, we will realign the
1726          * payloads by copying the received packets.
1727          */
1728         switch (sc->bge_chipid) {
1729         case BGE_CHIPID_BCM5701_A0:
1730         case BGE_CHIPID_BCM5701_B0:
1731         case BGE_CHIPID_BCM5701_B2:
1732         case BGE_CHIPID_BCM5701_B5:
1733                 /* If in PCI-X mode, work around the alignment bug. */
1734                 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
1735                     (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
1736                     BGE_PCISTATE_PCI_BUSSPEED)
1737                         sc->bge_rx_alignment_bug = 1;
1738                 break;
1739         }
1740
1741         /*
1742          * Call MI attach routine.
1743          */
1744         ether_ifattach(ifp, ether_addr);
1745
1746 fail:
1747         splx(s);
1748
1749         return(error);
1750 }
1751
1752 static int
1753 bge_detach(device_t dev)
1754 {
1755         struct bge_softc *sc;
1756         struct ifnet *ifp;
1757         int s;
1758
1759         s = splimp();
1760
1761         sc = device_get_softc(dev);
1762         ifp = &sc->arpcom.ac_if;
1763
1764         ether_ifdetach(ifp);
1765         bge_stop(sc);
1766         bge_reset(sc);
1767
1768         if (sc->bge_tbi) {
1769                 ifmedia_removeall(&sc->bge_ifmedia);
1770         } else {
1771                 bus_generic_detach(dev);
1772                 device_delete_child(dev, sc->bge_miibus);
1773         }
1774
1775         bge_release_resources(sc);
1776         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1777                 bge_free_jumbo_mem(sc);
1778
1779         splx(s);
1780
1781         return(0);
1782 }
1783
1784 static void
1785 bge_release_resources(struct bge_softc *sc)
1786 {
1787         device_t dev;
1788
1789         dev = sc->bge_dev;
1790
1791         if (sc->bge_vpd_prodname != NULL)
1792                 free(sc->bge_vpd_prodname, M_DEVBUF);
1793
1794         if (sc->bge_vpd_readonly != NULL)
1795                 free(sc->bge_vpd_readonly, M_DEVBUF);
1796
1797         if (sc->bge_intrhand != NULL)
1798                 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
1799
1800         if (sc->bge_irq != NULL)
1801                 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
1802
1803         if (sc->bge_res != NULL)
1804                 bus_release_resource(dev, SYS_RES_MEMORY,
1805                     BGE_PCI_BAR0, sc->bge_res);
1806
1807         if (sc->bge_rdata != NULL)
1808                 contigfree(sc->bge_rdata, sizeof(struct bge_ring_data),
1809                            M_DEVBUF);
1810
1811         return;
1812 }
1813
1814 static void
1815 bge_reset(struct bge_softc *sc)
1816 {
1817         device_t dev;
1818         uint32_t cachesize, command, pcistate;
1819         int i, val = 0;
1820
1821         dev = sc->bge_dev;
1822
1823         /* Save some important PCI state. */
1824         cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
1825         command = pci_read_config(dev, BGE_PCI_CMD, 4);
1826         pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
1827
1828         pci_write_config(dev, BGE_PCI_MISC_CTL,
1829             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1830             BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1831
1832         /* Issue global reset */
1833         bge_writereg_ind(sc, BGE_MISC_CFG,
1834                          BGE_MISCCFG_RESET_CORE_CLOCKS | (65<<1));
1835
1836         DELAY(1000);
1837
1838         /* Reset some of the PCI state that got zapped by reset */
1839         pci_write_config(dev, BGE_PCI_MISC_CTL,
1840             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1841             BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1842         pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
1843         pci_write_config(dev, BGE_PCI_CMD, command, 4);
1844         bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
1845
1846         /*
1847          * Prevent PXE restart: write a magic number to the
1848          * general communications memory at 0xB50.
1849          */
1850         bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1851         /*
1852          * Poll the value location we just wrote until
1853          * we see the 1's complement of the magic number.
1854          * This indicates that the firmware initialization
1855          * is complete.
1856          */
1857         for (i = 0; i < BGE_TIMEOUT; i++) {
1858                 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
1859                 if (val == ~BGE_MAGIC_NUMBER)
1860                         break;
1861                 DELAY(10);
1862         }
1863         
1864         if (i == BGE_TIMEOUT) {
1865                 if_printf(&sc->arpcom.ac_if, "firmware handshake timed out\n");
1866                 return;
1867         }
1868
1869         /*
1870          * XXX Wait for the value of the PCISTATE register to
1871          * return to its original pre-reset state. This is a
1872          * fairly good indicator of reset completion. If we don't
1873          * wait for the reset to fully complete, trying to read
1874          * from the device's non-PCI registers may yield garbage
1875          * results.
1876          */
1877         for (i = 0; i < BGE_TIMEOUT; i++) {
1878                 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
1879                         break;
1880                 DELAY(10);
1881         }
1882
1883         /* Enable memory arbiter. */
1884         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1885                 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
1886
1887         /* Fix up byte swapping */
1888         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME|
1889             BGE_MODECTL_BYTESWAP_DATA);
1890
1891         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1892
1893         DELAY(10000);
1894
1895         return;
1896 }
1897
1898 /*
1899  * Frame reception handling. This is called if there's a frame
1900  * on the receive return list.
1901  *
1902  * Note: we have to be able to handle two possibilities here:
1903  * 1) the frame is from the jumbo recieve ring
1904  * 2) the frame is from the standard receive ring
1905  */
1906
1907 static void
1908 bge_rxeof(struct bge_softc *sc)
1909 {
1910         struct ifnet *ifp;
1911         int stdcnt = 0, jumbocnt = 0;
1912
1913         ifp = &sc->arpcom.ac_if;
1914
1915         while(sc->bge_rx_saved_considx !=
1916             sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) {
1917                 struct bge_rx_bd        *cur_rx;
1918                 uint32_t                rxidx;
1919                 struct mbuf             *m = NULL;
1920                 uint16_t                vlan_tag = 0;
1921                 int                     have_tag = 0;
1922
1923                 cur_rx =
1924             &sc->bge_rdata->bge_rx_return_ring[sc->bge_rx_saved_considx];
1925
1926                 rxidx = cur_rx->bge_idx;
1927                 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
1928
1929                 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
1930                         have_tag = 1;
1931                         vlan_tag = cur_rx->bge_vlan_tag;
1932                 }
1933
1934                 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
1935                         BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1936                         m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
1937                         sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
1938                         jumbocnt++;
1939                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
1940                                 ifp->if_ierrors++;
1941                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
1942                                 continue;
1943                         }
1944                         if (bge_newbuf_jumbo(sc,
1945                             sc->bge_jumbo, NULL) == ENOBUFS) {
1946                                 ifp->if_ierrors++;
1947                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
1948                                 continue;
1949                         }
1950                 } else {
1951                         BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1952                         m = sc->bge_cdata.bge_rx_std_chain[rxidx];
1953                         sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
1954                         stdcnt++;
1955                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
1956                                 ifp->if_ierrors++;
1957                                 bge_newbuf_std(sc, sc->bge_std, m);
1958                                 continue;
1959                         }
1960                         if (bge_newbuf_std(sc, sc->bge_std,
1961                             NULL) == ENOBUFS) {
1962                                 ifp->if_ierrors++;
1963                                 bge_newbuf_std(sc, sc->bge_std, m);
1964                                 continue;
1965                         }
1966                 }
1967
1968                 ifp->if_ipackets++;
1969 #ifndef __i386__
1970                 /*
1971                  * The i386 allows unaligned accesses, but for other
1972                  * platforms we must make sure the payload is aligned.
1973                  */
1974                 if (sc->bge_rx_alignment_bug) {
1975                         bcopy(m->m_data, m->m_data + ETHER_ALIGN,
1976                             cur_rx->bge_len);
1977                         m->m_data += ETHER_ALIGN;
1978                 }
1979 #endif
1980                 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
1981                 m->m_pkthdr.rcvif = ifp;
1982
1983 #if 0 /* currently broken for some packets, possibly related to TCP options */
1984                 if (ifp->if_hwassist) {
1985                         m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1986                         if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
1987                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1988                         if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
1989                                 m->m_pkthdr.csum_data =
1990                                     cur_rx->bge_tcp_udp_csum;
1991                                 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
1992                         }
1993                 }
1994 #endif
1995
1996                 /*
1997                  * If we received a packet with a vlan tag, pass it
1998                  * to vlan_input() instead of ether_input().
1999                  */
2000                 if (have_tag) {
2001                         VLAN_INPUT_TAG(m, vlan_tag);
2002                         have_tag = vlan_tag = 0;
2003                         continue;
2004                 }
2005
2006                 (*ifp->if_input)(ifp, m);
2007         }
2008
2009         CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2010         if (stdcnt)
2011                 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2012         if (jumbocnt)
2013                 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2014 }
2015
2016 static void
2017 bge_txeof(struct bge_softc *sc)
2018 {
2019         struct bge_tx_bd *cur_tx = NULL;
2020         struct ifnet *ifp;
2021
2022         ifp = &sc->arpcom.ac_if;
2023
2024         /*
2025          * Go through our tx ring and free mbufs for those
2026          * frames that have been sent.
2027          */
2028         while (sc->bge_tx_saved_considx !=
2029             sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
2030                 uint32_t                idx = 0;
2031
2032                 idx = sc->bge_tx_saved_considx;
2033                 cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
2034                 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2035                         ifp->if_opackets++;
2036                 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2037                         m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2038                         sc->bge_cdata.bge_tx_chain[idx] = NULL;
2039                 }
2040                 sc->bge_txcnt--;
2041                 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2042                 ifp->if_timer = 0;
2043         }
2044
2045         if (cur_tx != NULL)
2046                 ifp->if_flags &= ~IFF_OACTIVE;
2047 }
2048
2049 static void
2050 bge_intr(void *xsc)
2051 {
2052         struct bge_softc *sc = xsc;;
2053         struct ifnet *ifp = &sc->arpcom.ac_if;
2054         uint32_t status;
2055
2056 #ifdef notdef
2057         /* Avoid this for now -- checking this register is expensive. */
2058         /* Make sure this is really our interrupt. */
2059         if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2060                 return;
2061 #endif
2062         /* Ack interrupt and stop others from occuring. */
2063         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2064
2065         /*
2066          * Process link state changes.
2067          * Grrr. The link status word in the status block does
2068          * not work correctly on the BCM5700 rev AX and BX chips,
2069          * according to all available information. Hence, we have
2070          * to enable MII interrupts in order to properly obtain
2071          * async link changes. Unfortunately, this also means that
2072          * we have to read the MAC status register to detect link
2073          * changes, thereby adding an additional register access to
2074          * the interrupt handler.
2075          */
2076
2077         if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
2078                 status = CSR_READ_4(sc, BGE_MAC_STS);
2079                 if (status & BGE_MACSTAT_MI_INTERRUPT) {
2080                         sc->bge_link = 0;
2081                         callout_stop(&sc->bge_stat_timer);
2082                         bge_tick(sc);
2083                         /* Clear the interrupt */
2084                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2085                             BGE_EVTENB_MI_INTERRUPT);
2086                         bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
2087                         bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
2088                             BRGPHY_INTRS);
2089                 }
2090         } else {
2091                 if ((sc->bge_rdata->bge_status_block.bge_status &
2092                     BGE_STATFLAG_UPDATED) &&
2093                     (sc->bge_rdata->bge_status_block.bge_status &
2094                     BGE_STATFLAG_LINKSTATE_CHANGED)) {
2095                         sc->bge_rdata->bge_status_block.bge_status &=
2096                                 ~(BGE_STATFLAG_UPDATED|
2097                                 BGE_STATFLAG_LINKSTATE_CHANGED);
2098                         /*
2099                          * Sometimes PCS encoding errors are detected in
2100                          * TBI mode (on fiber NICs), and for some reason
2101                          * the chip will signal them as link changes.
2102                          * If we get a link change event, but the 'PCS
2103                          * encoding error' bit in the MAC status register
2104                          * is set, don't bother doing a link check.
2105                          * This avoids spurious "gigabit link up" messages
2106                          * that sometimes appear on fiber NICs during
2107                          * periods of heavy traffic. (There should be no
2108                          * effect on copper NICs.)
2109                          */
2110                         status = CSR_READ_4(sc, BGE_MAC_STS);
2111                         if (!(status & (BGE_MACSTAT_PORT_DECODE_ERROR|
2112                             BGE_MACSTAT_MI_COMPLETE))) {
2113                                 sc->bge_link = 0;
2114                                 callout_stop(&sc->bge_stat_timer);
2115                                 bge_tick(sc);
2116                         }
2117                         sc->bge_link = 0;
2118                         callout_stop(&sc->bge_stat_timer);
2119                         bge_tick(sc);
2120                         /* Clear the interrupt */
2121                         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
2122                             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
2123                             BGE_MACSTAT_LINK_CHANGED);
2124
2125                         /* Force flush the status block cached by PCI bridge */
2126                         CSR_READ_4(sc, BGE_MBX_IRQ0_LO);
2127                 }
2128         }
2129
2130         if (ifp->if_flags & IFF_RUNNING) {
2131                 /* Check RX return ring producer/consumer */
2132                 bge_rxeof(sc);
2133
2134                 /* Check TX ring producer/consumer */
2135                 bge_txeof(sc);
2136         }
2137
2138         bge_handle_events(sc);
2139
2140         /* Re-enable interrupts. */
2141         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2142
2143         if ((ifp->if_flags & IFF_RUNNING) && !ifq_is_empty(&ifp->if_snd))
2144                 (*ifp->if_start)(ifp);
2145 }
2146
2147 static void
2148 bge_tick(void *xsc)
2149 {
2150         struct bge_softc *sc = xsc;
2151         struct ifnet *ifp = &sc->arpcom.ac_if;
2152         struct mii_data *mii = NULL;
2153         struct ifmedia *ifm = NULL;
2154         int s;
2155
2156         s = splimp();
2157
2158         if (sc->bge_asicrev == BGE_ASICREV_BCM5705)
2159                 bge_stats_update_regs(sc);
2160         else
2161                 bge_stats_update(sc);
2162         callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2163         if (sc->bge_link) {
2164                 splx(s);
2165                 return;
2166         }
2167
2168         if (sc->bge_tbi) {
2169                 ifm = &sc->bge_ifmedia;
2170                 if (CSR_READ_4(sc, BGE_MAC_STS) &
2171                     BGE_MACSTAT_TBI_PCS_SYNCHED) {
2172                         sc->bge_link++;
2173                         CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
2174                         if_printf(ifp, "gigabit link up\n");
2175                         if (!ifq_is_empty(&ifp->if_snd))
2176                                 (*ifp->if_start)(ifp);
2177                 }
2178                 splx(s);
2179                 return;
2180         }
2181
2182         mii = device_get_softc(sc->bge_miibus);
2183         mii_tick(mii);
2184  
2185         if (!sc->bge_link) {
2186                 mii_pollstat(mii);
2187                 if (mii->mii_media_status & IFM_ACTIVE &&
2188                     IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2189                         sc->bge_link++;
2190                         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
2191                             IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
2192                                 if_printf(ifp, "gigabit link up\n");
2193                         if (!ifq_is_empty(&ifp->if_snd))
2194                                 (*ifp->if_start)(ifp);
2195                 }
2196         }
2197
2198         splx(s);
2199 }
2200
2201 static void
2202 bge_stats_update_regs(struct bge_softc *sc)
2203 {
2204         struct ifnet *ifp = &sc->arpcom.ac_if;
2205         struct bge_mac_stats_regs stats;
2206         uint32_t *s;
2207         int i;
2208
2209         s = (uint32_t *)&stats;
2210         for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2211                 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2212                 s++;
2213         }
2214
2215         ifp->if_collisions +=
2216            (stats.dot3StatsSingleCollisionFrames +
2217            stats.dot3StatsMultipleCollisionFrames +
2218            stats.dot3StatsExcessiveCollisions +
2219            stats.dot3StatsLateCollisions) -
2220            ifp->if_collisions;
2221 }
2222
2223 static void
2224 bge_stats_update(struct bge_softc *sc)
2225 {
2226         struct ifnet *ifp = &sc->arpcom.ac_if;
2227         struct bge_stats *stats;
2228
2229         stats = (struct bge_stats *)(sc->bge_vhandle +
2230             BGE_MEMWIN_START + BGE_STATS_BLOCK);
2231
2232         ifp->if_collisions +=
2233            (stats->txstats.dot3StatsSingleCollisionFrames.bge_addr_lo +
2234            stats->txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo +
2235            stats->txstats.dot3StatsExcessiveCollisions.bge_addr_lo +
2236            stats->txstats.dot3StatsLateCollisions.bge_addr_lo) -
2237            ifp->if_collisions;
2238
2239 #ifdef notdef
2240         ifp->if_collisions +=
2241            (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2242            sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2243            sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2244            sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2245            ifp->if_collisions;
2246 #endif
2247 }
2248
2249 /*
2250  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
2251  * pointers to descriptors.
2252  */
2253 static int
2254 bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx)
2255 {
2256         struct bge_tx_bd *f = NULL;
2257         struct mbuf *m;
2258         uint32_t frag, cur, cnt = 0;
2259         uint16_t csum_flags = 0;
2260         struct ifvlan *ifv = NULL;
2261
2262         if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
2263             m_head->m_pkthdr.rcvif != NULL &&
2264             m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
2265                 ifv = m_head->m_pkthdr.rcvif->if_softc;
2266
2267         m = m_head;
2268         cur = frag = *txidx;
2269
2270         if (m_head->m_pkthdr.csum_flags) {
2271                 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2272                         csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2273                 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2274                         csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2275                 if (m_head->m_flags & M_LASTFRAG)
2276                         csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2277                 else if (m_head->m_flags & M_FRAG)
2278                         csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2279         }
2280         /*
2281          * Start packing the mbufs in this chain into
2282          * the fragment pointers. Stop when we run out
2283          * of fragments or hit the end of the mbuf chain.
2284          */
2285         for (m = m_head; m != NULL; m = m->m_next) {
2286                 if (m->m_len != 0) {
2287                         f = &sc->bge_rdata->bge_tx_ring[frag];
2288                         if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
2289                                 break;
2290                         BGE_HOSTADDR(f->bge_addr,
2291                             vtophys(mtod(m, vm_offset_t)));
2292                         f->bge_len = m->m_len;
2293                         f->bge_flags = csum_flags;
2294                         if (ifv != NULL) {
2295                                 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2296                                 f->bge_vlan_tag = ifv->ifv_tag;
2297                         } else {
2298                                 f->bge_vlan_tag = 0;
2299                         }
2300                         /*
2301                          * Sanity check: avoid coming within 16 descriptors
2302                          * of the end of the ring.
2303                          */
2304                         if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16)
2305                                 return(ENOBUFS);
2306                         cur = frag;
2307                         BGE_INC(frag, BGE_TX_RING_CNT);
2308                         cnt++;
2309                 }
2310         }
2311
2312         if (m != NULL)
2313                 return(ENOBUFS);
2314
2315         if (frag == sc->bge_tx_saved_considx)
2316                 return(ENOBUFS);
2317
2318         sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
2319         sc->bge_cdata.bge_tx_chain[cur] = m_head;
2320         sc->bge_txcnt += cnt;
2321
2322         *txidx = frag;
2323
2324         return(0);
2325 }
2326
2327 /*
2328  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2329  * to the mbuf data regions directly in the transmit descriptors.
2330  */
2331 static void
2332 bge_start(struct ifnet *ifp)
2333 {
2334         struct bge_softc *sc;
2335         struct mbuf *m_head = NULL;
2336         uint32_t prodidx = 0;
2337
2338         sc = ifp->if_softc;
2339
2340         if (!sc->bge_link)
2341                 return;
2342
2343         prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
2344
2345         while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
2346                 m_head = ifq_poll(&ifp->if_snd);
2347                 if (m_head == NULL)
2348                         break;
2349
2350                 /*
2351                  * XXX
2352                  * safety overkill.  If this is a fragmented packet chain
2353                  * with delayed TCP/UDP checksums, then only encapsulate
2354                  * it if we have enough descriptors to handle the entire
2355                  * chain at once.
2356                  * (paranoia -- may not actually be needed)
2357                  */
2358                 if (m_head->m_flags & M_FIRSTFRAG &&
2359                     m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
2360                         if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2361                             m_head->m_pkthdr.csum_data + 16) {
2362                                 ifp->if_flags |= IFF_OACTIVE;
2363                                 break;
2364                         }
2365                 }
2366
2367                 /*
2368                  * Pack the data into the transmit ring. If we
2369                  * don't have room, set the OACTIVE flag and wait
2370                  * for the NIC to drain the ring.
2371                  */
2372                 if (bge_encap(sc, m_head, &prodidx)) {
2373                         ifp->if_flags |= IFF_OACTIVE;
2374                         break;
2375                 }
2376                 m_head = ifq_dequeue(&ifp->if_snd);
2377
2378                 BPF_MTAP(ifp, m_head);
2379         }
2380
2381         /* Transmit */
2382         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2383         /* 5700 b2 errata */
2384         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
2385                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2386
2387         /*
2388          * Set a timeout in case the chip goes out to lunch.
2389          */
2390         ifp->if_timer = 5;
2391 }
2392
2393 static void
2394 bge_init(void *xsc)
2395 {
2396         struct bge_softc *sc = xsc;
2397         struct ifnet *ifp = &sc->arpcom.ac_if;
2398         uint16_t *m;
2399         int s;
2400
2401         s = splimp();
2402
2403         if (ifp->if_flags & IFF_RUNNING) {
2404                 splx(s);
2405                 return;
2406         }
2407
2408         /* Cancel pending I/O and flush buffers. */
2409         bge_stop(sc);
2410         bge_reset(sc);
2411         bge_chipinit(sc);
2412
2413         /*
2414          * Init the various state machines, ring
2415          * control blocks and firmware.
2416          */
2417         if (bge_blockinit(sc)) {
2418                 if_printf(ifp, "initialization failure\n");
2419                 splx(s);
2420                 return;
2421         }
2422
2423         /* Specify MTU. */
2424         CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
2425             ETHER_HDR_LEN + ETHER_CRC_LEN);
2426
2427         /* Load our MAC address. */
2428         m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
2429         CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2430         CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2431
2432         /* Enable or disable promiscuous mode as needed. */
2433         if (ifp->if_flags & IFF_PROMISC) {
2434                 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
2435         } else {
2436                 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
2437         }
2438
2439         /* Program multicast filter. */
2440         bge_setmulti(sc);
2441
2442         /* Init RX ring. */
2443         bge_init_rx_ring_std(sc);
2444
2445         /*
2446          * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
2447          * memory to insure that the chip has in fact read the first
2448          * entry of the ring.
2449          */
2450         if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
2451                 uint32_t                v, i;
2452                 for (i = 0; i < 10; i++) {
2453                         DELAY(20);
2454                         v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
2455                         if (v == (MCLBYTES - ETHER_ALIGN))
2456                                 break;
2457                 }
2458                 if (i == 10)
2459                         if_printf(ifp, "5705 A0 chip failed to load RX ring\n");
2460         }
2461
2462         /* Init jumbo RX ring. */
2463         if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2464                 bge_init_rx_ring_jumbo(sc);
2465
2466         /* Init our RX return ring index */
2467         sc->bge_rx_saved_considx = 0;
2468
2469         /* Init TX ring. */
2470         bge_init_tx_ring(sc);
2471
2472         /* Turn on transmitter */
2473         BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
2474
2475         /* Turn on receiver */
2476         BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2477
2478         /* Tell firmware we're alive. */
2479         BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2480
2481         /* Enable host interrupts. */
2482         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
2483         BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2484         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2485
2486         bge_ifmedia_upd(ifp);
2487
2488         ifp->if_flags |= IFF_RUNNING;
2489         ifp->if_flags &= ~IFF_OACTIVE;
2490
2491         splx(s);
2492
2493         callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2494 }
2495
2496 /*
2497  * Set media options.
2498  */
2499 static int
2500 bge_ifmedia_upd(struct ifnet *ifp)
2501 {
2502         struct bge_softc *sc = ifp->if_softc;
2503         struct ifmedia *ifm = &sc->bge_ifmedia;
2504         struct mii_data *mii;
2505
2506         /* If this is a 1000baseX NIC, enable the TBI port. */
2507         if (sc->bge_tbi) {
2508                 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2509                         return(EINVAL);
2510                 switch(IFM_SUBTYPE(ifm->ifm_media)) {
2511                 case IFM_AUTO:
2512                         break;
2513                 case IFM_1000_SX:
2514                         if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
2515                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
2516                                     BGE_MACMODE_HALF_DUPLEX);
2517                         } else {
2518                                 BGE_SETBIT(sc, BGE_MAC_MODE,
2519                                     BGE_MACMODE_HALF_DUPLEX);
2520                         }
2521                         break;
2522                 default:
2523                         return(EINVAL);
2524                 }
2525                 return(0);
2526         }
2527
2528         mii = device_get_softc(sc->bge_miibus);
2529         sc->bge_link = 0;
2530         if (mii->mii_instance) {
2531                 struct mii_softc *miisc;
2532                 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
2533                     miisc = LIST_NEXT(miisc, mii_list))
2534                         mii_phy_reset(miisc);
2535         }
2536         mii_mediachg(mii);
2537
2538         return(0);
2539 }
2540
2541 /*
2542  * Report current media status.
2543  */
2544 static void
2545 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2546 {
2547         struct bge_softc *sc = ifp->if_softc;
2548         struct mii_data *mii;
2549
2550         if (sc->bge_tbi) {
2551                 ifmr->ifm_status = IFM_AVALID;
2552                 ifmr->ifm_active = IFM_ETHER;
2553                 if (CSR_READ_4(sc, BGE_MAC_STS) &
2554                     BGE_MACSTAT_TBI_PCS_SYNCHED)
2555                         ifmr->ifm_status |= IFM_ACTIVE;
2556                 ifmr->ifm_active |= IFM_1000_SX;
2557                 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
2558                         ifmr->ifm_active |= IFM_HDX;    
2559                 else
2560                         ifmr->ifm_active |= IFM_FDX;
2561                 return;
2562         }
2563
2564         mii = device_get_softc(sc->bge_miibus);
2565         mii_pollstat(mii);
2566         ifmr->ifm_active = mii->mii_media_active;
2567         ifmr->ifm_status = mii->mii_media_status;
2568 }
2569
2570 static int
2571 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
2572 {
2573         struct bge_softc *sc = ifp->if_softc;
2574         struct ifreq *ifr = (struct ifreq *) data;
2575         int s, mask, error = 0;
2576         struct mii_data *mii;
2577
2578         s = splimp();
2579
2580         switch(command) {
2581         case SIOCSIFADDR:
2582         case SIOCGIFADDR:
2583                 error = ether_ioctl(ifp, command, data);
2584                 break;
2585         case SIOCSIFMTU:
2586                 /* Disallow jumbo frames on 5705. */
2587                 if ((sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2588                     ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
2589                         error = EINVAL;
2590                 else {
2591                         ifp->if_mtu = ifr->ifr_mtu;
2592                         ifp->if_flags &= ~IFF_RUNNING;
2593                         bge_init(sc);
2594                 }
2595                 break;
2596         case SIOCSIFFLAGS:
2597                 if (ifp->if_flags & IFF_UP) {
2598                         /*
2599                          * If only the state of the PROMISC flag changed,
2600                          * then just use the 'set promisc mode' command
2601                          * instead of reinitializing the entire NIC. Doing
2602                          * a full re-init means reloading the firmware and
2603                          * waiting for it to start up, which may take a
2604                          * second or two.
2605                          */
2606                         if (ifp->if_flags & IFF_RUNNING &&
2607                             ifp->if_flags & IFF_PROMISC &&
2608                             !(sc->bge_if_flags & IFF_PROMISC)) {
2609                                 BGE_SETBIT(sc, BGE_RX_MODE,
2610                                     BGE_RXMODE_RX_PROMISC);
2611                         } else if (ifp->if_flags & IFF_RUNNING &&
2612                             !(ifp->if_flags & IFF_PROMISC) &&
2613                             sc->bge_if_flags & IFF_PROMISC) {
2614                                 BGE_CLRBIT(sc, BGE_RX_MODE,
2615                                     BGE_RXMODE_RX_PROMISC);
2616                         } else
2617                                 bge_init(sc);
2618                 } else {
2619                         if (ifp->if_flags & IFF_RUNNING) {
2620                                 bge_stop(sc);
2621                         }
2622                 }
2623                 sc->bge_if_flags = ifp->if_flags;
2624                 error = 0;
2625                 break;
2626         case SIOCADDMULTI:
2627         case SIOCDELMULTI:
2628                 if (ifp->if_flags & IFF_RUNNING) {
2629                         bge_setmulti(sc);
2630                         error = 0;
2631                 }
2632                 break;
2633         case SIOCSIFMEDIA:
2634         case SIOCGIFMEDIA:
2635                 if (sc->bge_tbi) {
2636                         error = ifmedia_ioctl(ifp, ifr,
2637                             &sc->bge_ifmedia, command);
2638                 } else {
2639                         mii = device_get_softc(sc->bge_miibus);
2640                         error = ifmedia_ioctl(ifp, ifr,
2641                             &mii->mii_media, command);
2642                 }
2643                 break;
2644         case SIOCSIFCAP:
2645                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2646                 if (mask & IFCAP_HWCSUM) {
2647                         if (IFCAP_HWCSUM & ifp->if_capenable)
2648                                 ifp->if_capenable &= ~IFCAP_HWCSUM;
2649                         else
2650                                 ifp->if_capenable |= IFCAP_HWCSUM;
2651                 }
2652                 error = 0;
2653                 break;
2654         default:
2655                 error = EINVAL;
2656                 break;
2657         }
2658
2659         splx(s);
2660
2661         return(error);
2662 }
2663
2664 static void
2665 bge_watchdog(struct ifnet *ifp)
2666 {
2667         struct bge_softc *sc = ifp->if_softc;
2668
2669         if_printf(ifp, "watchdog timeout -- resetting\n");
2670
2671         ifp->if_flags &= ~IFF_RUNNING;
2672         bge_init(sc);
2673
2674         ifp->if_oerrors++;
2675 }
2676
2677 /*
2678  * Stop the adapter and free any mbufs allocated to the
2679  * RX and TX lists.
2680  */
2681 static void
2682 bge_stop(struct bge_softc *sc)
2683 {
2684         struct ifnet *ifp = &sc->arpcom.ac_if;
2685         struct ifmedia_entry *ifm;
2686         struct mii_data *mii = NULL;
2687         int mtmp, itmp;
2688
2689         if (!sc->bge_tbi)
2690                 mii = device_get_softc(sc->bge_miibus);
2691
2692         callout_stop(&sc->bge_stat_timer);
2693
2694         /*
2695          * Disable all of the receiver blocks
2696          */
2697         BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2698         BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2699         BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2700         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
2701                 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2702         BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
2703         BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2704         BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
2705
2706         /*
2707          * Disable all of the transmit blocks
2708          */
2709         BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2710         BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2711         BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2712         BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
2713         BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
2714         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
2715                 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2716         BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2717
2718         /*
2719          * Shut down all of the memory managers and related
2720          * state machines.
2721          */
2722         BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
2723         BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
2724         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
2725                 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2726         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
2727         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
2728         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
2729                 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
2730                 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2731         }
2732
2733         /* Disable host interrupts. */
2734         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2735         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2736
2737         /*
2738          * Tell firmware we're shutting down.
2739          */
2740         BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2741
2742         /* Free the RX lists. */
2743         bge_free_rx_ring_std(sc);
2744
2745         /* Free jumbo RX list. */
2746         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
2747                 bge_free_rx_ring_jumbo(sc);
2748
2749         /* Free TX buffers. */
2750         bge_free_tx_ring(sc);
2751
2752         /*
2753          * Isolate/power down the PHY, but leave the media selection
2754          * unchanged so that things will be put back to normal when
2755          * we bring the interface back up.
2756          */
2757         if (!sc->bge_tbi) {
2758                 itmp = ifp->if_flags;
2759                 ifp->if_flags |= IFF_UP;
2760                 ifm = mii->mii_media.ifm_cur;
2761                 mtmp = ifm->ifm_media;
2762                 ifm->ifm_media = IFM_ETHER|IFM_NONE;
2763                 mii_mediachg(mii);
2764                 ifm->ifm_media = mtmp;
2765                 ifp->if_flags = itmp;
2766         }
2767
2768         sc->bge_link = 0;
2769
2770         sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
2771
2772         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2773 }
2774
2775 /*
2776  * Stop all chip I/O so that the kernel's probe routines don't
2777  * get confused by errant DMAs when rebooting.
2778  */
2779 static void
2780 bge_shutdown(device_t dev)
2781 {
2782         struct bge_softc *sc = device_get_softc(dev);
2783
2784         bge_stop(sc); 
2785         bge_reset(sc);
2786 }