Add the DragonFly cvs id and perform general cleanups on cvs/rcs/sccs ids. Most
[dragonfly.git] / sys / dev / netif / bge / if_bge.c
1 /*
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.22 2003/05/11 18:00:55 ps Exp $
34  * $DragonFly: src/sys/dev/netif/bge/if_bge.c,v 1.2 2003/06/17 04:28:22 dillon Exp $
35  *
36  * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.22 2003/05/11 18:00:55 ps Exp $
37  */
38
39 /*
40  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
41  * 
42  * Written by Bill Paul <wpaul@windriver.com>
43  * Senior Engineer, Wind River Systems
44  */
45
46 /*
47  * The Broadcom BCM5700 is based on technology originally developed by
48  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
49  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
50  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
51  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
52  * frames, highly configurable RX filtering, and 16 RX and TX queues
53  * (which, along with RX filter rules, can be used for QOS applications).
54  * Other features, such as TCP segmentation, may be available as part
55  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
56  * firmware images can be stored in hardware and need not be compiled
57  * into the driver.
58  *
59  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
60  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
61  * 
62  * The BCM5701 is a single-chip solution incorporating both the BCM5700
63  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
64  * does not support external SSRAM.
65  *
66  * Broadcom also produces a variation of the BCM5700 under the "Altima"
67  * brand name, which is functionally similar but lacks PCI-X support.
68  *
69  * Without external SSRAM, you can only have at most 4 TX rings,
70  * and the use of the mini RX ring is disabled. This seems to imply
71  * that these features are simply not available on the BCM5701. As a
72  * result, this driver does not implement any support for the mini RX
73  * ring.
74  */
75
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/sockio.h>
79 #include <sys/mbuf.h>
80 #include <sys/malloc.h>
81 #include <sys/kernel.h>
82 #include <sys/socket.h>
83 #include <sys/queue.h>
84
85 #include <net/if.h>
86 #include <net/if_arp.h>
87 #include <net/ethernet.h>
88 #include <net/if_dl.h>
89 #include <net/if_media.h>
90
91 #include <net/bpf.h>
92
93 #include <net/if_types.h>
94 #include <net/if_vlan_var.h>
95
96 #include <netinet/in_systm.h>
97 #include <netinet/in.h>
98 #include <netinet/ip.h>
99
100 #include <vm/vm.h>              /* for vtophys */
101 #include <vm/pmap.h>            /* for vtophys */
102 #include <machine/clock.h>      /* for DELAY */
103 #include <machine/bus_memio.h>
104 #include <machine/bus.h>
105 #include <machine/resource.h>
106 #include <sys/bus.h>
107 #include <sys/rman.h>
108
109 #include <dev/mii/mii.h>
110 #include <dev/mii/miivar.h>
111 #include <dev/mii/miidevs.h>
112 #include <dev/mii/brgphyreg.h>
113
114 #include <pci/pcireg.h>
115 #include <pci/pcivar.h>
116
117 #include <dev/bge/if_bgereg.h>
118
119 #define BGE_CSUM_FEATURES       (CSUM_IP | CSUM_TCP | CSUM_UDP)
120
121 /* "controller miibus0" required.  See GENERIC if you get errors here. */
122 #include "miibus_if.h"
123
124 /*
125  * Various supported device vendors/types and their names. Note: the
126  * spec seems to indicate that the hardware still has Alteon's vendor
127  * ID burned into it, though it will always be overriden by the vendor
128  * ID in the EEPROM. Just to be safe, we cover all possibilities.
129  */
130 #define BGE_DEVDESC_MAX         64      /* Maximum device description length */
131
132 static struct bge_type bge_devs[] = {
133         { ALT_VENDORID, ALT_DEVICEID_BCM5700,
134                 "Broadcom BCM5700 Gigabit Ethernet" },
135         { ALT_VENDORID, ALT_DEVICEID_BCM5701,
136                 "Broadcom BCM5701 Gigabit Ethernet" },
137         { BCOM_VENDORID, BCOM_DEVICEID_BCM5700,
138                 "Broadcom BCM5700 Gigabit Ethernet" },
139         { BCOM_VENDORID, BCOM_DEVICEID_BCM5701,
140                 "Broadcom BCM5701 Gigabit Ethernet" },
141         { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X,
142                 "Broadcom BCM5702X Gigabit Ethernet" },
143         { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X,
144                 "Broadcom BCM5703X Gigabit Ethernet" },
145         { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C,
146                 "Broadcom BCM5704C Dual Gigabit Ethernet" },
147         { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S,
148                 "Broadcom BCM5704S Dual Gigabit Ethernet" },
149         { SK_VENDORID, SK_DEVICEID_ALTIMA,
150                 "SysKonnect Gigabit Ethernet" },
151         { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000,
152                 "Altima AC1000 Gigabit Ethernet" },
153         { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100,
154                 "Altima AC9100 Gigabit Ethernet" },
155         { 0, 0, NULL }
156 };
157
158 static int bge_probe            __P((device_t));
159 static int bge_attach           __P((device_t));
160 static int bge_detach           __P((device_t));
161 static void bge_release_resources
162                                 __P((struct bge_softc *));
163 static void bge_txeof           __P((struct bge_softc *));
164 static void bge_rxeof           __P((struct bge_softc *));
165
166 static void bge_tick            __P((void *));
167 static void bge_stats_update    __P((struct bge_softc *));
168 static int bge_encap            __P((struct bge_softc *, struct mbuf *,
169                                         u_int32_t *));
170
171 static void bge_intr            __P((void *));
172 static void bge_start           __P((struct ifnet *));
173 static int bge_ioctl            __P((struct ifnet *, u_long, caddr_t));
174 static void bge_init            __P((void *));
175 static void bge_stop            __P((struct bge_softc *));
176 static void bge_watchdog                __P((struct ifnet *));
177 static void bge_shutdown                __P((device_t));
178 static int bge_ifmedia_upd      __P((struct ifnet *));
179 static void bge_ifmedia_sts     __P((struct ifnet *, struct ifmediareq *));
180
181 static u_int8_t bge_eeprom_getbyte      __P((struct bge_softc *,
182                                                 int, u_int8_t *));
183 static int bge_read_eeprom      __P((struct bge_softc *, caddr_t, int, int));
184
185 static u_int32_t bge_crc        __P((caddr_t));
186 static void bge_setmulti        __P((struct bge_softc *));
187
188 static void bge_handle_events   __P((struct bge_softc *));
189 static int bge_alloc_jumbo_mem  __P((struct bge_softc *));
190 static void bge_free_jumbo_mem  __P((struct bge_softc *));
191 static void *bge_jalloc         __P((struct bge_softc *));
192 static void bge_jfree           __P((caddr_t, u_int));
193 static void bge_jref            __P((caddr_t, u_int));
194 static int bge_newbuf_std       __P((struct bge_softc *, int, struct mbuf *));
195 static int bge_newbuf_jumbo     __P((struct bge_softc *, int, struct mbuf *));
196 static int bge_init_rx_ring_std __P((struct bge_softc *));
197 static void bge_free_rx_ring_std        __P((struct bge_softc *));
198 static int bge_init_rx_ring_jumbo       __P((struct bge_softc *));
199 static void bge_free_rx_ring_jumbo      __P((struct bge_softc *));
200 static void bge_free_tx_ring    __P((struct bge_softc *));
201 static int bge_init_tx_ring     __P((struct bge_softc *));
202
203 static int bge_chipinit         __P((struct bge_softc *));
204 static int bge_blockinit        __P((struct bge_softc *));
205
206 #ifdef notdef
207 static u_int8_t bge_vpd_readbyte __P((struct bge_softc *, int));
208 static void bge_vpd_read_res    __P((struct bge_softc *,
209                                         struct vpd_res *, int));
210 static void bge_vpd_read        __P((struct bge_softc *));
211 #endif
212
213 static u_int32_t bge_readmem_ind
214                                 __P((struct bge_softc *, int));
215 static void bge_writemem_ind    __P((struct bge_softc *, int, int));
216 #ifdef notdef
217 static u_int32_t bge_readreg_ind
218                                 __P((struct bge_softc *, int));
219 #endif
220 static void bge_writereg_ind    __P((struct bge_softc *, int, int));
221
222 static int bge_miibus_readreg   __P((device_t, int, int));
223 static int bge_miibus_writereg  __P((device_t, int, int, int));
224 static void bge_miibus_statchg  __P((device_t));
225
226 static void bge_reset           __P((struct bge_softc *));
227
228 static device_method_t bge_methods[] = {
229         /* Device interface */
230         DEVMETHOD(device_probe,         bge_probe),
231         DEVMETHOD(device_attach,        bge_attach),
232         DEVMETHOD(device_detach,        bge_detach),
233         DEVMETHOD(device_shutdown,      bge_shutdown),
234
235         /* bus interface */
236         DEVMETHOD(bus_print_child,      bus_generic_print_child),
237         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
238
239         /* MII interface */
240         DEVMETHOD(miibus_readreg,       bge_miibus_readreg),
241         DEVMETHOD(miibus_writereg,      bge_miibus_writereg),
242         DEVMETHOD(miibus_statchg,       bge_miibus_statchg),
243
244         { 0, 0 }
245 };
246
247 static driver_t bge_driver = {
248         "bge",
249         bge_methods,
250         sizeof(struct bge_softc)
251 };
252
253 static devclass_t bge_devclass;
254
255 DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, 0, 0);
256 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
257
258 static u_int32_t
259 bge_readmem_ind(sc, off)
260         struct bge_softc *sc;
261         int off;
262 {
263         device_t dev;
264
265         dev = sc->bge_dev;
266
267         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
268         return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
269 }
270
271 static void
272 bge_writemem_ind(sc, off, val)
273         struct bge_softc *sc;
274         int off, val;
275 {
276         device_t dev;
277
278         dev = sc->bge_dev;
279
280         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
281         pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
282
283         return;
284 }
285
286 #ifdef notdef
287 static u_int32_t
288 bge_readreg_ind(sc, off)
289         struct bge_softc *sc;
290         int off;
291 {
292         device_t dev;
293
294         dev = sc->bge_dev;
295
296         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
297         return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
298 }
299 #endif
300
301 static void
302 bge_writereg_ind(sc, off, val)
303         struct bge_softc *sc;
304         int off, val;
305 {
306         device_t dev;
307
308         dev = sc->bge_dev;
309
310         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
311         pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
312
313         return;
314 }
315
316 #ifdef notdef
317 static u_int8_t
318 bge_vpd_readbyte(sc, addr)
319         struct bge_softc *sc;
320         int addr;
321 {
322         int i;
323         device_t dev;
324         u_int32_t val;
325
326         dev = sc->bge_dev;
327         pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
328         for (i = 0; i < BGE_TIMEOUT * 10; i++) {
329                 DELAY(10);
330                 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
331                         break;
332         }
333
334         if (i == BGE_TIMEOUT) {
335                 printf("bge%d: VPD read timed out\n", sc->bge_unit);
336                 return(0);
337         }
338
339         val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
340
341         return((val >> ((addr % 4) * 8)) & 0xFF);
342 }
343
344 static void
345 bge_vpd_read_res(sc, res, addr)
346         struct bge_softc *sc;
347         struct vpd_res *res;
348         int addr;
349 {
350         int i;
351         u_int8_t *ptr;
352
353         ptr = (u_int8_t *)res;
354         for (i = 0; i < sizeof(struct vpd_res); i++)
355                 ptr[i] = bge_vpd_readbyte(sc, i + addr);
356
357         return;
358 }
359
360 static void
361 bge_vpd_read(sc)
362         struct bge_softc *sc;
363 {
364         int pos = 0, i;
365         struct vpd_res res;
366
367         if (sc->bge_vpd_prodname != NULL)
368                 free(sc->bge_vpd_prodname, M_DEVBUF);
369         if (sc->bge_vpd_readonly != NULL)
370                 free(sc->bge_vpd_readonly, M_DEVBUF);
371         sc->bge_vpd_prodname = NULL;
372         sc->bge_vpd_readonly = NULL;
373
374         bge_vpd_read_res(sc, &res, pos);
375
376         if (res.vr_id != VPD_RES_ID) {
377                 printf("bge%d: bad VPD resource id: expected %x got %x\n",
378                         sc->bge_unit, VPD_RES_ID, res.vr_id);
379                 return;
380         }
381
382         pos += sizeof(res);
383         sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
384         for (i = 0; i < res.vr_len; i++)
385                 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
386         sc->bge_vpd_prodname[i] = '\0';
387         pos += i;
388
389         bge_vpd_read_res(sc, &res, pos);
390
391         if (res.vr_id != VPD_RES_READ) {
392                 printf("bge%d: bad VPD resource id: expected %x got %x\n",
393                     sc->bge_unit, VPD_RES_READ, res.vr_id);
394                 return;
395         }
396
397         pos += sizeof(res);
398         sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
399         for (i = 0; i < res.vr_len + 1; i++)
400                 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
401
402         return;
403 }
404 #endif
405
406 /*
407  * Read a byte of data stored in the EEPROM at address 'addr.' The
408  * BCM570x supports both the traditional bitbang interface and an
409  * auto access interface for reading the EEPROM. We use the auto
410  * access method.
411  */
412 static u_int8_t
413 bge_eeprom_getbyte(sc, addr, dest)
414         struct bge_softc *sc;
415         int addr;
416         u_int8_t *dest;
417 {
418         int i;
419         u_int32_t byte = 0;
420
421         /*
422          * Enable use of auto EEPROM access so we can avoid
423          * having to use the bitbang method.
424          */
425         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
426
427         /* Reset the EEPROM, load the clock period. */
428         CSR_WRITE_4(sc, BGE_EE_ADDR,
429             BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
430         DELAY(20);
431
432         /* Issue the read EEPROM command. */
433         CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
434
435         /* Wait for completion */
436         for(i = 0; i < BGE_TIMEOUT * 10; i++) {
437                 DELAY(10);
438                 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
439                         break;
440         }
441
442         if (i == BGE_TIMEOUT) {
443                 printf("bge%d: eeprom read timed out\n", sc->bge_unit);
444                 return(0);
445         }
446
447         /* Get result. */
448         byte = CSR_READ_4(sc, BGE_EE_DATA);
449
450         *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
451
452         return(0);
453 }
454
455 /*
456  * Read a sequence of bytes from the EEPROM.
457  */
458 static int
459 bge_read_eeprom(sc, dest, off, cnt)
460         struct bge_softc *sc;
461         caddr_t dest;
462         int off;
463         int cnt;
464 {
465         int err = 0, i;
466         u_int8_t byte = 0;
467
468         for (i = 0; i < cnt; i++) {
469                 err = bge_eeprom_getbyte(sc, off + i, &byte);
470                 if (err)
471                         break;
472                 *(dest + i) = byte;
473         }
474
475         return(err ? 1 : 0);
476 }
477
478 static int
479 bge_miibus_readreg(dev, phy, reg)
480         device_t dev;
481         int phy, reg;
482 {
483         struct bge_softc *sc;
484         struct ifnet *ifp;
485         u_int32_t val, autopoll;
486         int i;
487
488         sc = device_get_softc(dev);
489         ifp = &sc->arpcom.ac_if;
490
491         if (phy != 1)
492                 switch(sc->bge_chipid) {
493                 case BGE_CHIPID_BCM5701_B5:
494                 case BGE_CHIPID_BCM5703_A2:
495                 case BGE_CHIPID_BCM5704_A0:
496                         return(0);
497                 }
498
499         /* Reading with autopolling on may trigger PCI errors */
500         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
501         if (autopoll & BGE_MIMODE_AUTOPOLL) {
502                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
503                 DELAY(40);
504         }
505
506         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
507             BGE_MIPHY(phy)|BGE_MIREG(reg));
508
509         for (i = 0; i < BGE_TIMEOUT; i++) {
510                 val = CSR_READ_4(sc, BGE_MI_COMM);
511                 if (!(val & BGE_MICOMM_BUSY))
512                         break;
513         }
514
515         if (i == BGE_TIMEOUT) {
516                 printf("bge%d: PHY read timed out\n", sc->bge_unit);
517                 val = 0;
518                 goto done;
519         }
520
521         val = CSR_READ_4(sc, BGE_MI_COMM);
522
523 done:
524         if (autopoll & BGE_MIMODE_AUTOPOLL) {
525                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
526                 DELAY(40);
527         }
528
529         if (val & BGE_MICOMM_READFAIL)
530                 return(0);
531
532         return(val & 0xFFFF);
533 }
534
535 static int
536 bge_miibus_writereg(dev, phy, reg, val)
537         device_t dev;
538         int phy, reg, val;
539 {
540         struct bge_softc *sc;
541         u_int32_t autopoll;
542         int i;
543
544         sc = device_get_softc(dev);
545
546         /* Reading with autopolling on may trigger PCI errors */
547         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
548         if (autopoll & BGE_MIMODE_AUTOPOLL) {
549                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
550                 DELAY(40);
551         }
552
553         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
554             BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
555
556         for (i = 0; i < BGE_TIMEOUT; i++) {
557                 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
558                         break;
559         }
560
561         if (autopoll & BGE_MIMODE_AUTOPOLL) {
562                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
563                 DELAY(40);
564         }
565
566         if (i == BGE_TIMEOUT) {
567                 printf("bge%d: PHY read timed out\n", sc->bge_unit);
568                 return(0);
569         }
570
571         return(0);
572 }
573
574 static void
575 bge_miibus_statchg(dev)
576         device_t dev;
577 {
578         struct bge_softc *sc;
579         struct mii_data *mii;
580
581         sc = device_get_softc(dev);
582         mii = device_get_softc(sc->bge_miibus);
583
584         BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
585         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_TX) {
586                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
587         } else {
588                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
589         }
590
591         if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
592                 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
593         } else {
594                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
595         }
596
597         return;
598 }
599
600 /*
601  * Handle events that have triggered interrupts.
602  */
603 static void
604 bge_handle_events(sc)
605         struct bge_softc                *sc;
606 {
607
608         return;
609 }
610
611 /*
612  * Memory management for jumbo frames.
613  */
614
615 static int
616 bge_alloc_jumbo_mem(sc)
617         struct bge_softc                *sc;
618 {
619         caddr_t                 ptr;
620         register int            i;
621         struct bge_jpool_entry   *entry;
622
623         /* Grab a big chunk o' storage. */
624         sc->bge_cdata.bge_jumbo_buf = contigmalloc(BGE_JMEM, M_DEVBUF,
625                 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
626
627         if (sc->bge_cdata.bge_jumbo_buf == NULL) {
628                 printf("bge%d: no memory for jumbo buffers!\n", sc->bge_unit);
629                 return(ENOBUFS);
630         }
631
632         SLIST_INIT(&sc->bge_jfree_listhead);
633         SLIST_INIT(&sc->bge_jinuse_listhead);
634
635         /*
636          * Now divide it up into 9K pieces and save the addresses
637          * in an array. Note that we play an evil trick here by using
638          * the first few bytes in the buffer to hold the the address
639          * of the softc structure for this interface. This is because
640          * bge_jfree() needs it, but it is called by the mbuf management
641          * code which will not pass it to us explicitly.
642          */
643         ptr = sc->bge_cdata.bge_jumbo_buf;
644         for (i = 0; i < BGE_JSLOTS; i++) {
645                 u_int64_t               **aptr;
646                 aptr = (u_int64_t **)ptr;
647                 aptr[0] = (u_int64_t *)sc;
648                 ptr += sizeof(u_int64_t);
649                 sc->bge_cdata.bge_jslots[i].bge_buf = ptr;
650                 sc->bge_cdata.bge_jslots[i].bge_inuse = 0;
651                 ptr += (BGE_JLEN - sizeof(u_int64_t));
652                 entry = malloc(sizeof(struct bge_jpool_entry), 
653                                M_DEVBUF, M_NOWAIT);
654                 if (entry == NULL) {
655                         contigfree(sc->bge_cdata.bge_jumbo_buf,
656                             BGE_JMEM, M_DEVBUF);
657                         sc->bge_cdata.bge_jumbo_buf = NULL;
658                         printf("bge%d: no memory for jumbo "
659                             "buffer queue!\n", sc->bge_unit);
660                         return(ENOBUFS);
661                 }
662                 entry->slot = i;
663                 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
664                     entry, jpool_entries);
665         }
666
667         return(0);
668 }
669
670 static void
671 bge_free_jumbo_mem(sc)
672         struct bge_softc *sc;
673 {
674         int i;
675         struct bge_jpool_entry *entry;
676  
677         for (i = 0; i < BGE_JSLOTS; i++) {
678                 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
679                 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
680                 free(entry, M_DEVBUF);
681         }
682
683         contigfree(sc->bge_cdata.bge_jumbo_buf, BGE_JMEM, M_DEVBUF);
684
685         return;
686 }
687
688 /*
689  * Allocate a jumbo buffer.
690  */
691 static void *
692 bge_jalloc(sc)
693         struct bge_softc                *sc;
694 {
695         struct bge_jpool_entry   *entry;
696         
697         entry = SLIST_FIRST(&sc->bge_jfree_listhead);
698         
699         if (entry == NULL) {
700                 printf("bge%d: no free jumbo buffers\n", sc->bge_unit);
701                 return(NULL);
702         }
703
704         SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
705         SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
706         sc->bge_cdata.bge_jslots[entry->slot].bge_inuse = 1;
707         return(sc->bge_cdata.bge_jslots[entry->slot].bge_buf);
708 }
709
710 /*
711  * Adjust usage count on a jumbo buffer.
712  */
713 static void
714 bge_jref(buf, size)
715         caddr_t                 buf;
716         u_int                   size;
717 {
718         struct bge_softc                *sc;
719         u_int64_t               **aptr;
720         register int            i;
721
722         /* Extract the softc struct pointer. */
723         aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
724         sc = (struct bge_softc *)(aptr[0]);
725
726         if (sc == NULL)
727                 panic("bge_jref: can't find softc pointer!");
728
729         if (size != BGE_JUMBO_FRAMELEN)
730                 panic("bge_jref: adjusting refcount of buf of wrong size!");
731
732         /* calculate the slot this buffer belongs to */
733
734         i = ((vm_offset_t)aptr 
735              - (vm_offset_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
736
737         if ((i < 0) || (i >= BGE_JSLOTS))
738                 panic("bge_jref: asked to reference buffer "
739                     "that we don't manage!");
740         else if (sc->bge_cdata.bge_jslots[i].bge_inuse == 0)
741                 panic("bge_jref: buffer already free!");
742         else
743                 sc->bge_cdata.bge_jslots[i].bge_inuse++;
744
745         return;
746 }
747
748 /*
749  * Release a jumbo buffer.
750  */
751 static void
752 bge_jfree(buf, size)
753         caddr_t                 buf;
754         u_int                   size;
755 {
756         struct bge_softc                *sc;
757         u_int64_t               **aptr;
758         int                     i;
759         struct bge_jpool_entry   *entry;
760
761         /* Extract the softc struct pointer. */
762         aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
763         sc = (struct bge_softc *)(aptr[0]);
764
765         if (sc == NULL)
766                 panic("bge_jfree: can't find softc pointer!");
767
768         if (size != BGE_JUMBO_FRAMELEN)
769                 panic("bge_jfree: freeing buffer of wrong size!");
770
771         /* calculate the slot this buffer belongs to */
772
773         i = ((vm_offset_t)aptr 
774              - (vm_offset_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
775
776         if ((i < 0) || (i >= BGE_JSLOTS))
777                 panic("bge_jfree: asked to free buffer that we don't manage!");
778         else if (sc->bge_cdata.bge_jslots[i].bge_inuse == 0)
779                 panic("bge_jfree: buffer already free!");
780         else {
781                 sc->bge_cdata.bge_jslots[i].bge_inuse--;
782                 if(sc->bge_cdata.bge_jslots[i].bge_inuse == 0) {
783                         entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
784                         if (entry == NULL)
785                                 panic("bge_jfree: buffer not in use!");
786                         entry->slot = i;
787                         SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, 
788                                           jpool_entries);
789                         SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 
790                                           entry, jpool_entries);
791                 }
792         }
793
794         return;
795 }
796
797
798 /*
799  * Intialize a standard receive ring descriptor.
800  */
801 static int
802 bge_newbuf_std(sc, i, m)
803         struct bge_softc        *sc;
804         int                     i;
805         struct mbuf             *m;
806 {
807         struct mbuf             *m_new = NULL;
808         struct bge_rx_bd        *r;
809
810         if (m == NULL) {
811                 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
812                 if (m_new == NULL) {
813                         return(ENOBUFS);
814                 }
815
816                 MCLGET(m_new, M_DONTWAIT);
817                 if (!(m_new->m_flags & M_EXT)) {
818                         m_freem(m_new);
819                         return(ENOBUFS);
820                 }
821                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
822         } else {
823                 m_new = m;
824                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
825                 m_new->m_data = m_new->m_ext.ext_buf;
826         }
827
828         if (!sc->bge_rx_alignment_bug)
829                 m_adj(m_new, ETHER_ALIGN);
830         sc->bge_cdata.bge_rx_std_chain[i] = m_new;
831         r = &sc->bge_rdata->bge_rx_std_ring[i];
832         BGE_HOSTADDR(r->bge_addr) = vtophys(mtod(m_new, caddr_t));
833         r->bge_flags = BGE_RXBDFLAG_END;
834         r->bge_len = m_new->m_len;
835         r->bge_idx = i;
836
837         return(0);
838 }
839
840 /*
841  * Initialize a jumbo receive ring descriptor. This allocates
842  * a jumbo buffer from the pool managed internally by the driver.
843  */
844 static int
845 bge_newbuf_jumbo(sc, i, m)
846         struct bge_softc *sc;
847         int i;
848         struct mbuf *m;
849 {
850         struct mbuf *m_new = NULL;
851         struct bge_rx_bd *r;
852
853         if (m == NULL) {
854                 caddr_t                 *buf = NULL;
855
856                 /* Allocate the mbuf. */
857                 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
858                 if (m_new == NULL) {
859                         return(ENOBUFS);
860                 }
861
862                 /* Allocate the jumbo buffer */
863                 buf = bge_jalloc(sc);
864                 if (buf == NULL) {
865                         m_freem(m_new);
866                         printf("bge%d: jumbo allocation failed "
867                             "-- packet dropped!\n", sc->bge_unit);
868                         return(ENOBUFS);
869                 }
870
871                 /* Attach the buffer to the mbuf. */
872                 m_new->m_data = m_new->m_ext.ext_buf = (void *)buf;
873                 m_new->m_flags |= M_EXT;
874                 m_new->m_len = m_new->m_pkthdr.len =
875                     m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
876                 m_new->m_ext.ext_free = bge_jfree;
877                 m_new->m_ext.ext_ref = bge_jref;
878         } else {
879                 m_new = m;
880                 m_new->m_data = m_new->m_ext.ext_buf;
881                 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
882         }
883
884         if (!sc->bge_rx_alignment_bug)
885                 m_adj(m_new, ETHER_ALIGN);
886         /* Set up the descriptor. */
887         r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
888         sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
889         BGE_HOSTADDR(r->bge_addr) = vtophys(mtod(m_new, caddr_t));
890         r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
891         r->bge_len = m_new->m_len;
892         r->bge_idx = i;
893
894         return(0);
895 }
896
897 /*
898  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
899  * that's 1MB or memory, which is a lot. For now, we fill only the first
900  * 256 ring entries and hope that our CPU is fast enough to keep up with
901  * the NIC.
902  */
903 static int
904 bge_init_rx_ring_std(sc)
905         struct bge_softc *sc;
906 {
907         int i;
908
909         for (i = 0; i < BGE_SSLOTS; i++) {
910                 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
911                         return(ENOBUFS);
912         };
913
914         sc->bge_std = i - 1;
915         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
916
917         return(0);
918 }
919
920 static void
921 bge_free_rx_ring_std(sc)
922         struct bge_softc *sc;
923 {
924         int i;
925
926         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
927                 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
928                         m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
929                         sc->bge_cdata.bge_rx_std_chain[i] = NULL;
930                 }
931                 bzero((char *)&sc->bge_rdata->bge_rx_std_ring[i],
932                     sizeof(struct bge_rx_bd));
933         }
934
935         return;
936 }
937
938 static int
939 bge_init_rx_ring_jumbo(sc)
940         struct bge_softc *sc;
941 {
942         int i;
943         struct bge_rcb *rcb;
944
945         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
946                 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
947                         return(ENOBUFS);
948         };
949
950         sc->bge_jumbo = i - 1;
951
952         rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
953         rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
954         CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
955
956         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
957
958         return(0);
959 }
960
961 static void
962 bge_free_rx_ring_jumbo(sc)
963         struct bge_softc *sc;
964 {
965         int i;
966
967         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
968                 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
969                         m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
970                         sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
971                 }
972                 bzero((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i],
973                     sizeof(struct bge_rx_bd));
974         }
975
976         return;
977 }
978
979 static void
980 bge_free_tx_ring(sc)
981         struct bge_softc *sc;
982 {
983         int i;
984
985         if (sc->bge_rdata->bge_tx_ring == NULL)
986                 return;
987
988         for (i = 0; i < BGE_TX_RING_CNT; i++) {
989                 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
990                         m_freem(sc->bge_cdata.bge_tx_chain[i]);
991                         sc->bge_cdata.bge_tx_chain[i] = NULL;
992                 }
993                 bzero((char *)&sc->bge_rdata->bge_tx_ring[i],
994                     sizeof(struct bge_tx_bd));
995         }
996
997         return;
998 }
999
1000 static int
1001 bge_init_tx_ring(sc)
1002         struct bge_softc *sc;
1003 {
1004         sc->bge_txcnt = 0;
1005         sc->bge_tx_saved_considx = 0;
1006
1007         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1008         /* 5700 b2 errata */
1009         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1010                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1011
1012         CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1013         /* 5700 b2 errata */
1014         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1015                 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1016
1017         return(0);
1018 }
1019
1020 #define BGE_POLY        0xEDB88320
1021
1022 static u_int32_t
1023 bge_crc(addr)
1024         caddr_t addr;
1025 {
1026         u_int32_t idx, bit, data, crc;
1027
1028         /* Compute CRC for the address value. */
1029         crc = 0xFFFFFFFF; /* initial value */
1030
1031         for (idx = 0; idx < 6; idx++) {
1032                 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1)
1033                         crc = (crc >> 1) ^ (((crc ^ data) & 1) ? BGE_POLY : 0);
1034         }
1035
1036         return(crc & 0x7F);
1037 }
1038
1039 static void
1040 bge_setmulti(sc)
1041         struct bge_softc *sc;
1042 {
1043         struct ifnet *ifp;
1044         struct ifmultiaddr *ifma;
1045         u_int32_t hashes[4] = { 0, 0, 0, 0 };
1046         int h, i;
1047
1048         ifp = &sc->arpcom.ac_if;
1049
1050         if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1051                 for (i = 0; i < 4; i++)
1052                         CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1053                 return;
1054         }
1055
1056         /* First, zot all the existing filters. */
1057         for (i = 0; i < 4; i++)
1058                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1059
1060         /* Now program new ones. */
1061         for (ifma = ifp->if_multiaddrs.lh_first;
1062             ifma != NULL; ifma = ifma->ifma_link.le_next) {
1063                 if (ifma->ifma_addr->sa_family != AF_LINK)
1064                         continue;
1065                 h = bge_crc(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1066                 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1067         }
1068
1069         for (i = 0; i < 4; i++)
1070                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1071
1072         return;
1073 }
1074
1075 /*
1076  * Do endian, PCI and DMA initialization. Also check the on-board ROM
1077  * self-test results.
1078  */
1079 static int
1080 bge_chipinit(sc)
1081         struct bge_softc *sc;
1082 {
1083         int                     i;
1084         u_int32_t               dma_rw_ctl;
1085
1086         /* Set endianness before we access any non-PCI registers. */
1087 #if BYTE_ORDER == BIG_ENDIAN
1088         pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1089             BGE_BIGENDIAN_INIT, 4);
1090 #else
1091         pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1092             BGE_LITTLEENDIAN_INIT, 4);
1093 #endif
1094
1095         /*
1096          * Check the 'ROM failed' bit on the RX CPU to see if
1097          * self-tests passed.
1098          */
1099         if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1100                 printf("bge%d: RX CPU self-diagnostics failed!\n",
1101                     sc->bge_unit);
1102                 return(ENODEV);
1103         }
1104
1105         /* Clear the MAC control register */
1106         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1107
1108         /*
1109          * Clear the MAC statistics block in the NIC's
1110          * internal memory.
1111          */
1112         for (i = BGE_STATS_BLOCK;
1113             i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1114                 BGE_MEMWIN_WRITE(sc, i, 0);
1115
1116         for (i = BGE_STATUS_BLOCK;
1117             i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1118                 BGE_MEMWIN_WRITE(sc, i, 0);
1119
1120         /* Set up the PCI DMA control register. */
1121         if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1122             BGE_PCISTATE_PCI_BUSMODE) {
1123                 /* Conventional PCI bus */
1124                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1125                     (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1126                     (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1127                     (0x0F);
1128         } else {
1129                 /* PCI-X bus */
1130                 /*
1131                  * The 5704 uses a different encoding of read/write
1132                  * watermarks.
1133                  */
1134                 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1135                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1136                             (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1137                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1138                 else
1139                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1140                             (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1141                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1142                             (0x0F);
1143
1144                 /*
1145                  * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1146                  * for hardware bugs.
1147                  */
1148                 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1149                     sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1150                         u_int32_t tmp;
1151
1152                         tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1153                         if (tmp == 0x6 || tmp == 0x7)
1154                                 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1155                 }
1156         }
1157
1158         if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1159             sc->bge_asicrev == BGE_ASICREV_BCM5704)
1160                 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1161         pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1162
1163         /*
1164          * Set up general mode register.
1165          */
1166         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME|
1167             BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1168             BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1169             BGE_MODECTL_NO_RX_CRC|BGE_MODECTL_TX_NO_PHDR_CSUM|
1170             BGE_MODECTL_RX_NO_PHDR_CSUM);
1171
1172         /*
1173          * Disable memory write invalidate.  Apparently it is not supported
1174          * properly by these devices.
1175          */
1176         PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1177
1178 #ifdef __brokenalpha__
1179         /*
1180          * Must insure that we do not cross an 8K (bytes) boundary
1181          * for DMA reads.  Our highest limit is 1K bytes.  This is a 
1182          * restriction on some ALPHA platforms with early revision 
1183          * 21174 PCI chipsets, such as the AlphaPC 164lx 
1184          */
1185         PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
1186             BGE_PCI_READ_BNDRY_1024BYTES, 4);
1187 #endif
1188
1189         /* Set the timer prescaler (always 66Mhz) */
1190         CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1191
1192         return(0);
1193 }
1194
1195 static int
1196 bge_blockinit(sc)
1197         struct bge_softc *sc;
1198 {
1199         struct bge_rcb *rcb;
1200         volatile struct bge_rcb *vrcb;
1201         int i;
1202
1203         /*
1204          * Initialize the memory window pointer register so that
1205          * we can access the first 32K of internal NIC RAM. This will
1206          * allow us to set up the TX send ring RCBs and the RX return
1207          * ring RCBs, plus other things which live in NIC memory.
1208          */
1209         CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1210
1211         /* Configure mbuf memory pool */
1212         if (sc->bge_extram) {
1213                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_EXT_SSRAM);
1214                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1215         } else {
1216                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1217                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1218         }
1219
1220         /* Configure DMA resource pool */
1221         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, BGE_DMA_DESCRIPTORS);
1222         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1223
1224         /* Configure mbuf pool watermarks */
1225         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1226         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1227         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1228
1229         /* Configure DMA resource watermarks */
1230         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1231         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1232
1233         /* Enable buffer manager */
1234         CSR_WRITE_4(sc, BGE_BMAN_MODE,
1235             BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1236
1237         /* Poll for buffer manager start indication */
1238         for (i = 0; i < BGE_TIMEOUT; i++) {
1239                 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1240                         break;
1241                 DELAY(10);
1242         }
1243
1244         if (i == BGE_TIMEOUT) {
1245                 printf("bge%d: buffer manager failed to start\n",
1246                     sc->bge_unit);
1247                 return(ENXIO);
1248         }
1249
1250         /* Enable flow-through queues */
1251         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1252         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1253
1254         /* Wait until queue initialization is complete */
1255         for (i = 0; i < BGE_TIMEOUT; i++) {
1256                 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1257                         break;
1258                 DELAY(10);
1259         }
1260
1261         if (i == BGE_TIMEOUT) {
1262                 printf("bge%d: flow-through queue init failed\n",
1263                     sc->bge_unit);
1264                 return(ENXIO);
1265         }
1266
1267         /* Initialize the standard RX ring control block */
1268         rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1269         BGE_HOSTADDR(rcb->bge_hostaddr) =
1270             vtophys(&sc->bge_rdata->bge_rx_std_ring);
1271         rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1272         if (sc->bge_extram)
1273                 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1274         else
1275                 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1276         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1277         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1278         CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1279         CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1280
1281         /*
1282          * Initialize the jumbo RX ring control block
1283          * We set the 'ring disabled' bit in the flags
1284          * field until we're actually ready to start
1285          * using this ring (i.e. once we set the MTU
1286          * high enough to require it).
1287          */
1288         rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1289         BGE_HOSTADDR(rcb->bge_hostaddr) =
1290             vtophys(&sc->bge_rdata->bge_rx_jumbo_ring);
1291         rcb->bge_maxlen_flags =
1292             BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, BGE_RCB_FLAG_RING_DISABLED);
1293         if (sc->bge_extram)
1294                 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1295         else
1296                 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1297         CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1298             rcb->bge_hostaddr.bge_addr_hi);
1299         CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1300             rcb->bge_hostaddr.bge_addr_lo);
1301         CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1302         CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1303
1304         /* Set up dummy disabled mini ring RCB */
1305         rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
1306         rcb->bge_maxlen_flags =
1307             BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1308         CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1309
1310         /*
1311          * Set the BD ring replentish thresholds. The recommended
1312          * values are 1/8th the number of descriptors allocated to
1313          * each ring.
1314          */
1315         CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1316         CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1317
1318         /*
1319          * Disable all unused send rings by setting the 'ring disabled'
1320          * bit in the flags field of all the TX send ring control blocks.
1321          * These are located in NIC memory.
1322          */
1323         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1324             BGE_SEND_RING_RCB);
1325         for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1326                 vrcb->bge_maxlen_flags =
1327                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1328                 vrcb->bge_nicaddr = 0;
1329                 vrcb++;
1330         }
1331
1332         /* Configure TX RCB 0 (we use only the first ring) */
1333         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1334             BGE_SEND_RING_RCB);
1335         vrcb->bge_hostaddr.bge_addr_hi = 0;
1336         BGE_HOSTADDR(vrcb->bge_hostaddr) =
1337             vtophys(&sc->bge_rdata->bge_tx_ring);
1338         vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT);
1339         vrcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0);
1340
1341         /* Disable all unused RX return rings */
1342         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1343             BGE_RX_RETURN_RING_RCB);
1344         for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1345                 vrcb->bge_hostaddr.bge_addr_hi = 0;
1346                 vrcb->bge_hostaddr.bge_addr_lo = 0;
1347                 vrcb->bge_maxlen_flags =
1348                     BGE_RCB_MAXLEN_FLAGS(BGE_RETURN_RING_CNT,
1349                     BGE_RCB_FLAG_RING_DISABLED);
1350                 vrcb->bge_nicaddr = 0;
1351                 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1352                     (i * (sizeof(u_int64_t))), 0);
1353                 vrcb++;
1354         }
1355
1356         /* Initialize RX ring indexes */
1357         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1358         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1359         CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1360
1361         /*
1362          * Set up RX return ring 0
1363          * Note that the NIC address for RX return rings is 0x00000000.
1364          * The return rings live entirely within the host, so the
1365          * nicaddr field in the RCB isn't used.
1366          */
1367         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1368             BGE_RX_RETURN_RING_RCB);
1369         vrcb->bge_hostaddr.bge_addr_hi = 0;
1370         BGE_HOSTADDR(vrcb->bge_hostaddr) =
1371             vtophys(&sc->bge_rdata->bge_rx_return_ring);
1372         vrcb->bge_nicaddr = 0x00000000;
1373         vrcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(BGE_RETURN_RING_CNT, 0);
1374
1375         /* Set random backoff seed for TX */
1376         CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1377             sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1378             sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1379             sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1380             BGE_TX_BACKOFF_SEED_MASK);
1381
1382         /* Set inter-packet gap */
1383         CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1384
1385         /*
1386          * Specify which ring to use for packets that don't match
1387          * any RX rules.
1388          */
1389         CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1390
1391         /*
1392          * Configure number of RX lists. One interrupt distribution
1393          * list, sixteen active lists, one bad frames class.
1394          */
1395         CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1396
1397         /* Inialize RX list placement stats mask. */
1398         CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1399         CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1400
1401         /* Disable host coalescing until we get it set up */
1402         CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1403
1404         /* Poll to make sure it's shut down. */
1405         for (i = 0; i < BGE_TIMEOUT; i++) {
1406                 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1407                         break;
1408                 DELAY(10);
1409         }
1410
1411         if (i == BGE_TIMEOUT) {
1412                 printf("bge%d: host coalescing engine failed to idle\n",
1413                     sc->bge_unit);
1414                 return(ENXIO);
1415         }
1416
1417         /* Set up host coalescing defaults */
1418         CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1419         CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1420         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1421         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1422         CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1423         CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1424         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1425         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1426         CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1427
1428         /* Set up address of statistics block */
1429         CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1430         CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 0);
1431         CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1432             vtophys(&sc->bge_rdata->bge_info.bge_stats));
1433
1434         /* Set up address of status block */
1435         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1436         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 0);
1437         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1438             vtophys(&sc->bge_rdata->bge_status_block));
1439         sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
1440         sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
1441
1442         /* Turn on host coalescing state machine */
1443         CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1444
1445         /* Turn on RX BD completion state machine and enable attentions */
1446         CSR_WRITE_4(sc, BGE_RBDC_MODE,
1447             BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1448
1449         /* Turn on RX list placement state machine */
1450         CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1451
1452         /* Turn on RX list selector state machine. */
1453         CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1454
1455         /* Turn on DMA, clear stats */
1456         CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1457             BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1458             BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1459             BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1460             (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1461
1462         /* Set misc. local control, enable interrupts on attentions */
1463         CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1464
1465 #ifdef notdef
1466         /* Assert GPIO pins for PHY reset */
1467         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1468             BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1469         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1470             BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1471 #endif
1472
1473         /* Turn on DMA completion state machine */
1474         CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1475
1476         /* Turn on write DMA state machine */
1477         CSR_WRITE_4(sc, BGE_WDMA_MODE,
1478             BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1479         
1480         /* Turn on read DMA state machine */
1481         CSR_WRITE_4(sc, BGE_RDMA_MODE,
1482             BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1483
1484         /* Turn on RX data completion state machine */
1485         CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1486
1487         /* Turn on RX BD initiator state machine */
1488         CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1489
1490         /* Turn on RX data and RX BD initiator state machine */
1491         CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1492
1493         /* Turn on Mbuf cluster free state machine */
1494         CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1495
1496         /* Turn on send BD completion state machine */
1497         CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1498
1499         /* Turn on send data completion state machine */
1500         CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1501
1502         /* Turn on send data initiator state machine */
1503         CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1504
1505         /* Turn on send BD initiator state machine */
1506         CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1507
1508         /* Turn on send BD selector state machine */
1509         CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1510
1511         CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1512         CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1513             BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1514
1515         /* ack/clear link change events */
1516         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1517             BGE_MACSTAT_CFG_CHANGED);
1518         CSR_WRITE_4(sc, BGE_MI_STS, 0);
1519
1520         /* Enable PHY auto polling (for MII/GMII only) */
1521         if (sc->bge_tbi) {
1522                 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1523         } else {
1524                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1525                 if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1526                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1527                             BGE_EVTENB_MI_INTERRUPT);
1528         }
1529
1530         /* Enable link state change attentions. */
1531         BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1532
1533         return(0);
1534 }
1535
1536 /*
1537  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1538  * against our list and return its name if we find a match. Note
1539  * that since the Broadcom controller contains VPD support, we
1540  * can get the device name string from the controller itself instead
1541  * of the compiled-in string. This is a little slow, but it guarantees
1542  * we'll always announce the right product name.
1543  */
1544 static int
1545 bge_probe(dev)
1546         device_t dev;
1547 {
1548         struct bge_type *t;
1549         struct bge_softc *sc;
1550         char *descbuf;
1551
1552         t = bge_devs;
1553
1554         sc = device_get_softc(dev);
1555         bzero(sc, sizeof(struct bge_softc));
1556         sc->bge_unit = device_get_unit(dev);
1557         sc->bge_dev = dev;
1558
1559         while(t->bge_name != NULL) {
1560                 if ((pci_get_vendor(dev) == t->bge_vid) &&
1561                     (pci_get_device(dev) == t->bge_did)) {
1562 #ifdef notdef
1563                         bge_vpd_read(sc);
1564                         device_set_desc(dev, sc->bge_vpd_prodname);
1565 #endif
1566                         descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
1567                         if (descbuf == NULL)
1568                                 return(ENOMEM);
1569                         snprintf(descbuf, BGE_DEVDESC_MAX,
1570                             "%s, ASIC rev. %#04x", t->bge_name,
1571                             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1572                         device_set_desc_copy(dev, descbuf);
1573                         free(descbuf, M_TEMP);
1574                         return(0);
1575                 }
1576                 t++;
1577         }
1578
1579         return(ENXIO);
1580 }
1581
1582 static int
1583 bge_attach(dev)
1584         device_t dev;
1585 {
1586         int s;
1587         u_int32_t command;
1588         struct ifnet *ifp;
1589         struct bge_softc *sc;
1590         u_int32_t hwcfg = 0;
1591         u_int32_t mac_addr = 0;
1592         int unit, error = 0, rid;
1593
1594         s = splimp();
1595
1596         sc = device_get_softc(dev);
1597         unit = device_get_unit(dev);
1598         sc->bge_dev = dev;
1599         sc->bge_unit = unit;
1600
1601         /*
1602          * Map control/status registers.
1603          */
1604         command = pci_read_config(dev, PCIR_COMMAND, 4);
1605         command |= (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
1606         pci_write_config(dev, PCIR_COMMAND, command, 4);
1607         command = pci_read_config(dev, PCIR_COMMAND, 4);
1608
1609         if (!(command & PCIM_CMD_MEMEN)) {
1610                 printf("bge%d: failed to enable memory mapping!\n", unit);
1611                 error = ENXIO;
1612                 goto fail;
1613         }
1614
1615         rid = BGE_PCI_BAR0;
1616         sc->bge_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
1617             0, ~0, 1, RF_ACTIVE);
1618
1619         if (sc->bge_res == NULL) {
1620                 printf ("bge%d: couldn't map memory\n", unit);
1621                 error = ENXIO;
1622                 goto fail;
1623         }
1624
1625         sc->bge_btag = rman_get_bustag(sc->bge_res);
1626         sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
1627         sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res);
1628
1629         /*
1630          * XXX FIXME: rman_get_virtual() on the alpha is currently
1631          * broken and returns a physical address instead of a kernel
1632          * virtual address. Consequently, we need to do a little
1633          * extra mangling of the vhandle on the alpha. This should
1634          * eventually be fixed! The whole idea here is to get rid
1635          * of platform dependencies.
1636          */
1637 #ifdef __alpha__
1638         if (pci_cvt_to_bwx(sc->bge_vhandle))
1639                 sc->bge_vhandle = pci_cvt_to_bwx(sc->bge_vhandle);
1640         else
1641                 sc->bge_vhandle = pci_cvt_to_dense(sc->bge_vhandle);
1642         sc->bge_vhandle = ALPHA_PHYS_TO_K0SEG(sc->bge_vhandle);
1643 #endif
1644
1645         /* Allocate interrupt */
1646         rid = 0;
1647         
1648         sc->bge_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
1649             RF_SHAREABLE | RF_ACTIVE);
1650
1651         if (sc->bge_irq == NULL) {
1652                 printf("bge%d: couldn't map interrupt\n", unit);
1653                 error = ENXIO;
1654                 goto fail;
1655         }
1656
1657         error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET,
1658            bge_intr, sc, &sc->bge_intrhand);
1659
1660         if (error) {
1661                 bge_release_resources(sc);
1662                 printf("bge%d: couldn't set up irq\n", unit);
1663                 goto fail;
1664         }
1665
1666         sc->bge_unit = unit;
1667
1668         /* Try to reset the chip. */
1669         bge_reset(sc);
1670
1671         if (bge_chipinit(sc)) {
1672                 printf("bge%d: chip initialization failed\n", sc->bge_unit);
1673                 bge_release_resources(sc);
1674                 error = ENXIO;
1675                 goto fail;
1676         }
1677
1678         /*
1679          * Get station address from the EEPROM.
1680          */
1681         mac_addr = bge_readmem_ind(sc, 0x0c14);
1682         if ((mac_addr >> 16) == 0x484b) {
1683                 sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8);
1684                 sc->arpcom.ac_enaddr[1] = (u_char)mac_addr;
1685                 mac_addr = bge_readmem_ind(sc, 0x0c18);
1686                 sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24);
1687                 sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16);
1688                 sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8);
1689                 sc->arpcom.ac_enaddr[5] = (u_char)mac_addr;
1690         } else if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
1691             BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
1692                 printf("bge%d: failed to read station address\n", unit);
1693                 bge_release_resources(sc);
1694                 error = ENXIO;
1695                 goto fail;
1696         }
1697
1698         /*
1699          * A Broadcom chip was detected. Inform the world.
1700          */
1701         printf("bge%d: Ethernet address: %6D\n", unit,
1702             sc->arpcom.ac_enaddr, ":");
1703
1704         /* Allocate the general information block and ring buffers. */
1705         sc->bge_rdata = contigmalloc(sizeof(struct bge_ring_data), M_DEVBUF,
1706             M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1707
1708         if (sc->bge_rdata == NULL) {
1709                 bge_release_resources(sc);
1710                 error = ENXIO;
1711                 printf("bge%d: no memory for list buffers!\n", sc->bge_unit);
1712                 goto fail;
1713         }
1714
1715         bzero(sc->bge_rdata, sizeof(struct bge_ring_data));
1716
1717         /* Try to allocate memory for jumbo buffers. */
1718         if (bge_alloc_jumbo_mem(sc)) {
1719                 printf("bge%d: jumbo buffer allocation "
1720                     "failed\n", sc->bge_unit);
1721                 bge_release_resources(sc);
1722                 error = ENXIO;
1723                 goto fail;
1724         }
1725
1726         /* Set default tuneable values. */
1727         sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
1728         sc->bge_rx_coal_ticks = 150;
1729         sc->bge_tx_coal_ticks = 150;
1730         sc->bge_rx_max_coal_bds = 64;
1731         sc->bge_tx_max_coal_bds = 128;
1732
1733         /* Set up ifnet structure */
1734         ifp = &sc->arpcom.ac_if;
1735         ifp->if_softc = sc;
1736         ifp->if_unit = sc->bge_unit;
1737         ifp->if_name = "bge";
1738         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1739         ifp->if_ioctl = bge_ioctl;
1740         ifp->if_output = ether_output;
1741         ifp->if_start = bge_start;
1742         ifp->if_watchdog = bge_watchdog;
1743         ifp->if_init = bge_init;
1744         ifp->if_mtu = ETHERMTU;
1745         ifp->if_snd.ifq_maxlen = BGE_TX_RING_CNT - 1;
1746         ifp->if_hwassist = BGE_CSUM_FEATURES;
1747         ifp->if_capabilities = IFCAP_HWCSUM;
1748         ifp->if_capenable = ifp->if_capabilities;
1749
1750         /* Save ASIC rev. */
1751
1752         sc->bge_chipid =
1753             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1754             BGE_PCIMISCCTL_ASICREV;
1755         sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
1756         sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
1757
1758         /*
1759          * Figure out what sort of media we have by checking the
1760          * hardware config word in the first 32k of NIC internal memory,
1761          * or fall back to examining the EEPROM if necessary.
1762          * Note: on some BCM5700 cards, this value appears to be unset.
1763          * If that's the case, we have to rely on identifying the NIC
1764          * by its PCI subsystem ID, as we do below for the SysKonnect
1765          * SK-9D41.
1766          */
1767         if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
1768                 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
1769         else {
1770                 bge_read_eeprom(sc, (caddr_t)&hwcfg,
1771                                 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
1772                 hwcfg = ntohl(hwcfg);
1773         }
1774
1775         if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
1776                 sc->bge_tbi = 1;
1777
1778         /* The SysKonnect SK-9D41 is a 1000baseSX card. */
1779         if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
1780                 sc->bge_tbi = 1;
1781
1782         if (sc->bge_tbi) {
1783                 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
1784                     bge_ifmedia_upd, bge_ifmedia_sts);
1785                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
1786                 ifmedia_add(&sc->bge_ifmedia,
1787                     IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
1788                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1789                 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
1790         } else {
1791                 /*
1792                  * Do transceiver setup.
1793                  */
1794                 if (mii_phy_probe(dev, &sc->bge_miibus,
1795                     bge_ifmedia_upd, bge_ifmedia_sts)) {
1796                         printf("bge%d: MII without any PHY!\n", sc->bge_unit);
1797                         bge_release_resources(sc);
1798                         bge_free_jumbo_mem(sc);
1799                         error = ENXIO;
1800                         goto fail;
1801                 }
1802         }
1803
1804         /*
1805          * When using the BCM5701 in PCI-X mode, data corruption has
1806          * been observed in the first few bytes of some received packets.
1807          * Aligning the packet buffer in memory eliminates the corruption.
1808          * Unfortunately, this misaligns the packet payloads.  On platforms
1809          * which do not support unaligned accesses, we will realign the
1810          * payloads by copying the received packets.
1811          */
1812         switch (sc->bge_chipid) {
1813         case BGE_CHIPID_BCM5701_A0:
1814         case BGE_CHIPID_BCM5701_B0:
1815         case BGE_CHIPID_BCM5701_B2:
1816         case BGE_CHIPID_BCM5701_B5:
1817                 /* If in PCI-X mode, work around the alignment bug. */
1818                 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
1819                     (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
1820                     BGE_PCISTATE_PCI_BUSSPEED)
1821                         sc->bge_rx_alignment_bug = 1;
1822                 break;
1823         }
1824
1825         /*
1826          * Call MI attach routine.
1827          */
1828         ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
1829         callout_handle_init(&sc->bge_stat_ch);
1830
1831 fail:
1832         splx(s);
1833
1834         return(error);
1835 }
1836
1837 static int
1838 bge_detach(dev)
1839         device_t dev;
1840 {
1841         struct bge_softc *sc;
1842         struct ifnet *ifp;
1843         int s;
1844
1845         s = splimp();
1846
1847         sc = device_get_softc(dev);
1848         ifp = &sc->arpcom.ac_if;
1849
1850         ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
1851         bge_stop(sc);
1852         bge_reset(sc);
1853
1854         if (sc->bge_tbi) {
1855                 ifmedia_removeall(&sc->bge_ifmedia);
1856         } else {
1857                 bus_generic_detach(dev);
1858                 device_delete_child(dev, sc->bge_miibus);
1859         }
1860
1861         bge_release_resources(sc);
1862         bge_free_jumbo_mem(sc);
1863
1864         splx(s);
1865
1866         return(0);
1867 }
1868
1869 static void
1870 bge_release_resources(sc)
1871         struct bge_softc *sc;
1872 {
1873         device_t dev;
1874
1875         dev = sc->bge_dev;
1876
1877         if (sc->bge_vpd_prodname != NULL)
1878                 free(sc->bge_vpd_prodname, M_DEVBUF);
1879
1880         if (sc->bge_vpd_readonly != NULL)
1881                 free(sc->bge_vpd_readonly, M_DEVBUF);
1882
1883         if (sc->bge_intrhand != NULL)
1884                 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
1885
1886         if (sc->bge_irq != NULL)
1887                 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
1888
1889         if (sc->bge_res != NULL)
1890                 bus_release_resource(dev, SYS_RES_MEMORY,
1891                     BGE_PCI_BAR0, sc->bge_res);
1892
1893         if (sc->bge_rdata != NULL)
1894                 contigfree(sc->bge_rdata,
1895                     sizeof(struct bge_ring_data), M_DEVBUF);
1896
1897         return;
1898 }
1899
1900 static void
1901 bge_reset(sc)
1902         struct bge_softc *sc;
1903 {
1904         device_t dev;
1905         u_int32_t cachesize, command, pcistate;
1906         int i, val = 0;
1907
1908         dev = sc->bge_dev;
1909
1910         /* Save some important PCI state. */
1911         cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
1912         command = pci_read_config(dev, BGE_PCI_CMD, 4);
1913         pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
1914
1915         pci_write_config(dev, BGE_PCI_MISC_CTL,
1916             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1917             BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1918
1919         /* Issue global reset */
1920         bge_writereg_ind(sc, BGE_MISC_CFG,
1921             BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1));
1922
1923         DELAY(1000);
1924
1925         /* Reset some of the PCI state that got zapped by reset */
1926         pci_write_config(dev, BGE_PCI_MISC_CTL,
1927             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1928             BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1929         pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
1930         pci_write_config(dev, BGE_PCI_CMD, command, 4);
1931         bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
1932
1933         /*
1934          * Prevent PXE restart: write a magic number to the
1935          * general communications memory at 0xB50.
1936          */
1937         bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1938         /*
1939          * Poll the value location we just wrote until
1940          * we see the 1's complement of the magic number.
1941          * This indicates that the firmware initialization
1942          * is complete.
1943          */
1944         for (i = 0; i < BGE_TIMEOUT; i++) {
1945                 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
1946                 if (val == ~BGE_MAGIC_NUMBER)
1947                         break;
1948                 DELAY(10);
1949         }
1950         
1951         if (i == BGE_TIMEOUT) {
1952                 printf("bge%d: firmware handshake timed out\n", sc->bge_unit);
1953                 return;
1954         }
1955
1956         /*
1957          * XXX Wait for the value of the PCISTATE register to
1958          * return to its original pre-reset state. This is a
1959          * fairly good indicator of reset completion. If we don't
1960          * wait for the reset to fully complete, trying to read
1961          * from the device's non-PCI registers may yield garbage
1962          * results.
1963          */
1964         for (i = 0; i < BGE_TIMEOUT; i++) {
1965                 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
1966                         break;
1967                 DELAY(10);
1968         }
1969
1970         /* Enable memory arbiter. */
1971         CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
1972
1973         /* Fix up byte swapping */
1974         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME|
1975             BGE_MODECTL_BYTESWAP_DATA);
1976
1977         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1978
1979         DELAY(10000);
1980
1981         return;
1982 }
1983
1984 /*
1985  * Frame reception handling. This is called if there's a frame
1986  * on the receive return list.
1987  *
1988  * Note: we have to be able to handle two possibilities here:
1989  * 1) the frame is from the jumbo recieve ring
1990  * 2) the frame is from the standard receive ring
1991  */
1992
1993 static void
1994 bge_rxeof(sc)
1995         struct bge_softc *sc;
1996 {
1997         struct ifnet *ifp;
1998         int stdcnt = 0, jumbocnt = 0;
1999
2000         ifp = &sc->arpcom.ac_if;
2001
2002         while(sc->bge_rx_saved_considx !=
2003             sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) {
2004                 struct bge_rx_bd        *cur_rx;
2005                 u_int32_t               rxidx;
2006                 struct ether_header     *eh;
2007                 struct mbuf             *m = NULL;
2008                 u_int16_t               vlan_tag = 0;
2009                 int                     have_tag = 0;
2010
2011                 cur_rx =
2012             &sc->bge_rdata->bge_rx_return_ring[sc->bge_rx_saved_considx];
2013
2014                 rxidx = cur_rx->bge_idx;
2015                 BGE_INC(sc->bge_rx_saved_considx, BGE_RETURN_RING_CNT);
2016
2017                 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2018                         have_tag = 1;
2019                         vlan_tag = cur_rx->bge_vlan_tag;
2020                 }
2021
2022                 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2023                         BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2024                         m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2025                         sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2026                         jumbocnt++;
2027                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2028                                 ifp->if_ierrors++;
2029                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2030                                 continue;
2031                         }
2032                         if (bge_newbuf_jumbo(sc,
2033                             sc->bge_jumbo, NULL) == ENOBUFS) {
2034                                 ifp->if_ierrors++;
2035                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2036                                 continue;
2037                         }
2038                 } else {
2039                         BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2040                         m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2041                         sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2042                         stdcnt++;
2043                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2044                                 ifp->if_ierrors++;
2045                                 bge_newbuf_std(sc, sc->bge_std, m);
2046                                 continue;
2047                         }
2048                         if (bge_newbuf_std(sc, sc->bge_std,
2049                             NULL) == ENOBUFS) {
2050                                 ifp->if_ierrors++;
2051                                 bge_newbuf_std(sc, sc->bge_std, m);
2052                                 continue;
2053                         }
2054                 }
2055
2056                 ifp->if_ipackets++;
2057 #ifndef __i386__
2058                 /*
2059                  * The i386 allows unaligned accesses, but for other
2060                  * platforms we must make sure the payload is aligned.
2061                  */
2062                 if (sc->bge_rx_alignment_bug) {
2063                         bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2064                             cur_rx->bge_len);
2065                         m->m_data += ETHER_ALIGN;
2066                 }
2067 #endif
2068                 eh = mtod(m, struct ether_header *);
2069                 m->m_pkthdr.len = m->m_len = cur_rx->bge_len;
2070                 m->m_pkthdr.rcvif = ifp;
2071
2072                 /* Remove header from mbuf and pass it on. */
2073                 m_adj(m, sizeof(struct ether_header));
2074
2075 #if 0 /* currently broken for some packets, possibly related to TCP options */
2076                 if (ifp->if_hwassist) {
2077                         m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2078                         if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2079                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2080                         if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2081                                 m->m_pkthdr.csum_data =
2082                                     cur_rx->bge_tcp_udp_csum;
2083                                 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
2084                         }
2085                 }
2086 #endif
2087
2088                 /*
2089                  * If we received a packet with a vlan tag, pass it
2090                  * to vlan_input() instead of ether_input().
2091                  */
2092                 if (have_tag) {
2093                         VLAN_INPUT_TAG(eh, m, vlan_tag);
2094                         have_tag = vlan_tag = 0;
2095                         continue;
2096                 }
2097
2098                 ether_input(ifp, eh, m);
2099         }
2100
2101         CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2102         if (stdcnt)
2103                 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2104         if (jumbocnt)
2105                 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2106
2107         return;
2108 }
2109
2110 static void
2111 bge_txeof(sc)
2112         struct bge_softc *sc;
2113 {
2114         struct bge_tx_bd *cur_tx = NULL;
2115         struct ifnet *ifp;
2116
2117         ifp = &sc->arpcom.ac_if;
2118
2119         /*
2120          * Go through our tx ring and free mbufs for those
2121          * frames that have been sent.
2122          */
2123         while (sc->bge_tx_saved_considx !=
2124             sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
2125                 u_int32_t               idx = 0;
2126
2127                 idx = sc->bge_tx_saved_considx;
2128                 cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
2129                 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2130                         ifp->if_opackets++;
2131                 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2132                         m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2133                         sc->bge_cdata.bge_tx_chain[idx] = NULL;
2134                 }
2135                 sc->bge_txcnt--;
2136                 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2137                 ifp->if_timer = 0;
2138         }
2139
2140         if (cur_tx != NULL)
2141                 ifp->if_flags &= ~IFF_OACTIVE;
2142
2143         return;
2144 }
2145
2146 static void
2147 bge_intr(xsc)
2148         void *xsc;
2149 {
2150         struct bge_softc *sc;
2151         struct ifnet *ifp;
2152
2153         sc = xsc;
2154         ifp = &sc->arpcom.ac_if;
2155
2156 #ifdef notdef
2157         /* Avoid this for now -- checking this register is expensive. */
2158         /* Make sure this is really our interrupt. */
2159         if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2160                 return;
2161 #endif
2162         /* Ack interrupt and stop others from occuring. */
2163         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2164
2165         /*
2166          * Process link state changes.
2167          * Grrr. The link status word in the status block does
2168          * not work correctly on the BCM5700 rev AX and BX chips,
2169          * according to all avaibable information. Hence, we have
2170          * to enable MII interrupts in order to properly obtain
2171          * async link changes. Unfortunately, this also means that
2172          * we have to read the MAC status register to detect link
2173          * changes, thereby adding an additional register access to
2174          * the interrupt handler.
2175          */
2176
2177         if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
2178                 u_int32_t               status;
2179
2180                 status = CSR_READ_4(sc, BGE_MAC_STS);
2181                 if (status & BGE_MACSTAT_MI_INTERRUPT) {
2182                         sc->bge_link = 0;
2183                         untimeout(bge_tick, sc, sc->bge_stat_ch);
2184                         bge_tick(sc);
2185                         /* Clear the interrupt */
2186                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2187                             BGE_EVTENB_MI_INTERRUPT);
2188                         bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
2189                         bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
2190                             BRGPHY_INTRS);
2191                 }
2192         } else {
2193                 if ((sc->bge_rdata->bge_status_block.bge_status &
2194                     BGE_STATFLAG_UPDATED) &&
2195                     (sc->bge_rdata->bge_status_block.bge_status &
2196                     BGE_STATFLAG_LINKSTATE_CHANGED)) {
2197                         sc->bge_rdata->bge_status_block.bge_status &= ~(BGE_STATFLAG_UPDATED|BGE_STATFLAG_LINKSTATE_CHANGED);
2198                         sc->bge_link = 0;
2199                         untimeout(bge_tick, sc, sc->bge_stat_ch);
2200                         bge_tick(sc);
2201                         /* Clear the interrupt */
2202                         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
2203                             BGE_MACSTAT_CFG_CHANGED);
2204
2205                         /* Force flush the status block cached by PCI bridge */
2206                         CSR_READ_4(sc, BGE_MBX_IRQ0_LO);
2207                 }
2208         }
2209
2210         if (ifp->if_flags & IFF_RUNNING) {
2211                 /* Check RX return ring producer/consumer */
2212                 bge_rxeof(sc);
2213
2214                 /* Check TX ring producer/consumer */
2215                 bge_txeof(sc);
2216         }
2217
2218         bge_handle_events(sc);
2219
2220         /* Re-enable interrupts. */
2221         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2222
2223         if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL)
2224                 bge_start(ifp);
2225
2226         return;
2227 }
2228
2229 static void
2230 bge_tick(xsc)
2231         void *xsc;
2232 {
2233         struct bge_softc *sc;
2234         struct mii_data *mii = NULL;
2235         struct ifmedia *ifm = NULL;
2236         struct ifnet *ifp;
2237         int s;
2238
2239         sc = xsc;
2240         ifp = &sc->arpcom.ac_if;
2241
2242         s = splimp();
2243
2244         bge_stats_update(sc);
2245         sc->bge_stat_ch = timeout(bge_tick, sc, hz);
2246         if (sc->bge_link) {
2247                 splx(s);
2248                 return;
2249         }
2250
2251         if (sc->bge_tbi) {
2252                 ifm = &sc->bge_ifmedia;
2253                 if (CSR_READ_4(sc, BGE_MAC_STS) &
2254                     BGE_MACSTAT_TBI_PCS_SYNCHED) {
2255                         sc->bge_link++;
2256                         CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
2257                         printf("bge%d: gigabit link up\n", sc->bge_unit);
2258                         if (ifp->if_snd.ifq_head != NULL)
2259                                 bge_start(ifp);
2260                 }
2261                 splx(s);
2262                 return;
2263         }
2264
2265         mii = device_get_softc(sc->bge_miibus);
2266         mii_tick(mii);
2267  
2268         if (!sc->bge_link) {
2269                 mii_pollstat(mii);
2270                 if (mii->mii_media_status & IFM_ACTIVE &&
2271                     IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2272                         sc->bge_link++;
2273                         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_TX ||
2274                             IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
2275                                 printf("bge%d: gigabit link up\n",
2276                                    sc->bge_unit);
2277                         if (ifp->if_snd.ifq_head != NULL)
2278                                 bge_start(ifp);
2279                 }
2280         }
2281
2282         splx(s);
2283
2284         return;
2285 }
2286
2287 static void
2288 bge_stats_update(sc)
2289         struct bge_softc *sc;
2290 {
2291         struct ifnet *ifp;
2292         struct bge_stats *stats;
2293
2294         ifp = &sc->arpcom.ac_if;
2295
2296         stats = (struct bge_stats *)(sc->bge_vhandle +
2297             BGE_MEMWIN_START + BGE_STATS_BLOCK);
2298
2299         ifp->if_collisions +=
2300            (stats->dot3StatsSingleCollisionFrames.bge_addr_lo +
2301            stats->dot3StatsMultipleCollisionFrames.bge_addr_lo +
2302            stats->dot3StatsExcessiveCollisions.bge_addr_lo +
2303            stats->dot3StatsLateCollisions.bge_addr_lo) -
2304            ifp->if_collisions;
2305
2306 #ifdef notdef
2307         ifp->if_collisions +=
2308            (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2309            sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2310            sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2311            sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2312            ifp->if_collisions;
2313 #endif
2314
2315         return;
2316 }
2317
2318 /*
2319  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
2320  * pointers to descriptors.
2321  */
2322 static int
2323 bge_encap(sc, m_head, txidx)
2324         struct bge_softc *sc;
2325         struct mbuf *m_head;
2326         u_int32_t *txidx;
2327 {
2328         struct bge_tx_bd        *f = NULL;
2329         struct mbuf             *m;
2330         u_int32_t               frag, cur, cnt = 0;
2331         u_int16_t               csum_flags = 0;
2332         struct ifvlan           *ifv = NULL;
2333
2334         if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
2335             m_head->m_pkthdr.rcvif != NULL &&
2336             m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
2337                 ifv = m_head->m_pkthdr.rcvif->if_softc;
2338
2339         m = m_head;
2340         cur = frag = *txidx;
2341
2342         if (m_head->m_pkthdr.csum_flags) {
2343                 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2344                         csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2345                 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2346                         csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2347                 if (m_head->m_flags & M_LASTFRAG)
2348                         csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2349                 else if (m_head->m_flags & M_FRAG)
2350                         csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2351         }
2352         /*
2353          * Start packing the mbufs in this chain into
2354          * the fragment pointers. Stop when we run out
2355          * of fragments or hit the end of the mbuf chain.
2356          */
2357         for (m = m_head; m != NULL; m = m->m_next) {
2358                 if (m->m_len != 0) {
2359                         f = &sc->bge_rdata->bge_tx_ring[frag];
2360                         if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
2361                                 break;
2362                         BGE_HOSTADDR(f->bge_addr) =
2363                            vtophys(mtod(m, vm_offset_t));
2364                         f->bge_len = m->m_len;
2365                         f->bge_flags = csum_flags;
2366                         if (ifv != NULL) {
2367                                 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2368                                 f->bge_vlan_tag = ifv->ifv_tag;
2369                         } else {
2370                                 f->bge_vlan_tag = 0;
2371                         }
2372                         /*
2373                          * Sanity check: avoid coming within 16 descriptors
2374                          * of the end of the ring.
2375                          */
2376                         if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16)
2377                                 return(ENOBUFS);
2378                         cur = frag;
2379                         BGE_INC(frag, BGE_TX_RING_CNT);
2380                         cnt++;
2381                 }
2382         }
2383
2384         if (m != NULL)
2385                 return(ENOBUFS);
2386
2387         if (frag == sc->bge_tx_saved_considx)
2388                 return(ENOBUFS);
2389
2390         sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
2391         sc->bge_cdata.bge_tx_chain[cur] = m_head;
2392         sc->bge_txcnt += cnt;
2393
2394         *txidx = frag;
2395
2396         return(0);
2397 }
2398
2399 /*
2400  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2401  * to the mbuf data regions directly in the transmit descriptors.
2402  */
2403 static void
2404 bge_start(ifp)
2405         struct ifnet *ifp;
2406 {
2407         struct bge_softc *sc;
2408         struct mbuf *m_head = NULL;
2409         u_int32_t prodidx = 0;
2410
2411         sc = ifp->if_softc;
2412
2413         if (!sc->bge_link && ifp->if_snd.ifq_len < 10)
2414                 return;
2415
2416         prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
2417
2418         while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
2419                 IF_DEQUEUE(&ifp->if_snd, m_head);
2420                 if (m_head == NULL)
2421                         break;
2422
2423                 /*
2424                  * XXX
2425                  * safety overkill.  If this is a fragmented packet chain
2426                  * with delayed TCP/UDP checksums, then only encapsulate
2427                  * it if we have enough descriptors to handle the entire
2428                  * chain at once.
2429                  * (paranoia -- may not actually be needed)
2430                  */
2431                 if (m_head->m_flags & M_FIRSTFRAG &&
2432                     m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
2433                         if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2434                             m_head->m_pkthdr.csum_data + 16) {
2435                                 IF_PREPEND(&ifp->if_snd, m_head);
2436                                 ifp->if_flags |= IFF_OACTIVE;
2437                                 break;
2438                         }
2439                 }
2440
2441                 /*
2442                  * Pack the data into the transmit ring. If we
2443                  * don't have room, set the OACTIVE flag and wait
2444                  * for the NIC to drain the ring.
2445                  */
2446                 if (bge_encap(sc, m_head, &prodidx)) {
2447                         IF_PREPEND(&ifp->if_snd, m_head);
2448                         ifp->if_flags |= IFF_OACTIVE;
2449                         break;
2450                 }
2451
2452                 /*
2453                  * If there's a BPF listener, bounce a copy of this frame
2454                  * to him.
2455                  */
2456                 if (ifp->if_bpf)
2457                         bpf_mtap(ifp, m_head);
2458         }
2459
2460         /* Transmit */
2461         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2462         /* 5700 b2 errata */
2463         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
2464                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2465
2466         /*
2467          * Set a timeout in case the chip goes out to lunch.
2468          */
2469         ifp->if_timer = 5;
2470
2471         return;
2472 }
2473
2474 static void
2475 bge_init(xsc)
2476         void *xsc;
2477 {
2478         struct bge_softc *sc = xsc;
2479         struct ifnet *ifp;
2480         u_int16_t *m;
2481         int s;
2482
2483         s = splimp();
2484
2485         ifp = &sc->arpcom.ac_if;
2486
2487         if (ifp->if_flags & IFF_RUNNING) {
2488                 splx(s);
2489                 return;
2490         }
2491
2492         /* Cancel pending I/O and flush buffers. */
2493         bge_stop(sc);
2494         bge_reset(sc);
2495         bge_chipinit(sc);
2496
2497         /*
2498          * Init the various state machines, ring
2499          * control blocks and firmware.
2500          */
2501         if (bge_blockinit(sc)) {
2502                 printf("bge%d: initialization failure\n", sc->bge_unit);
2503                 splx(s);
2504                 return;
2505         }
2506
2507         ifp = &sc->arpcom.ac_if;
2508
2509         /* Specify MTU. */
2510         CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
2511             ETHER_HDR_LEN + ETHER_CRC_LEN);
2512
2513         /* Load our MAC address. */
2514         m = (u_int16_t *)&sc->arpcom.ac_enaddr[0];
2515         CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2516         CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2517
2518         /* Enable or disable promiscuous mode as needed. */
2519         if (ifp->if_flags & IFF_PROMISC) {
2520                 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
2521         } else {
2522                 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
2523         }
2524
2525         /* Program multicast filter. */
2526         bge_setmulti(sc);
2527
2528         /* Init RX ring. */
2529         bge_init_rx_ring_std(sc);
2530
2531         /* Init jumbo RX ring. */
2532         if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2533                 bge_init_rx_ring_jumbo(sc);
2534
2535         /* Init our RX return ring index */
2536         sc->bge_rx_saved_considx = 0;
2537
2538         /* Init TX ring. */
2539         bge_init_tx_ring(sc);
2540
2541         /* Turn on transmitter */
2542         BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
2543
2544         /* Turn on receiver */
2545         BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2546
2547         /* Tell firmware we're alive. */
2548         BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2549
2550         /* Enable host interrupts. */
2551         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
2552         BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2553         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2554
2555         bge_ifmedia_upd(ifp);
2556
2557         ifp->if_flags |= IFF_RUNNING;
2558         ifp->if_flags &= ~IFF_OACTIVE;
2559
2560         splx(s);
2561
2562         sc->bge_stat_ch = timeout(bge_tick, sc, hz);
2563
2564         return;
2565 }
2566
2567 /*
2568  * Set media options.
2569  */
2570 static int
2571 bge_ifmedia_upd(ifp)
2572         struct ifnet *ifp;
2573 {
2574         struct bge_softc *sc;
2575         struct mii_data *mii;
2576         struct ifmedia *ifm;
2577
2578         sc = ifp->if_softc;
2579         ifm = &sc->bge_ifmedia;
2580
2581         /* If this is a 1000baseX NIC, enable the TBI port. */
2582         if (sc->bge_tbi) {
2583                 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2584                         return(EINVAL);
2585                 switch(IFM_SUBTYPE(ifm->ifm_media)) {
2586                 case IFM_AUTO:
2587                         break;
2588                 case IFM_1000_SX:
2589                         if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
2590                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
2591                                     BGE_MACMODE_HALF_DUPLEX);
2592                         } else {
2593                                 BGE_SETBIT(sc, BGE_MAC_MODE,
2594                                     BGE_MACMODE_HALF_DUPLEX);
2595                         }
2596                         break;
2597                 default:
2598                         return(EINVAL);
2599                 }
2600                 return(0);
2601         }
2602
2603         mii = device_get_softc(sc->bge_miibus);
2604         sc->bge_link = 0;
2605         if (mii->mii_instance) {
2606                 struct mii_softc *miisc;
2607                 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
2608                     miisc = LIST_NEXT(miisc, mii_list))
2609                         mii_phy_reset(miisc);
2610         }
2611         mii_mediachg(mii);
2612
2613         return(0);
2614 }
2615
2616 /*
2617  * Report current media status.
2618  */
2619 static void
2620 bge_ifmedia_sts(ifp, ifmr)
2621         struct ifnet *ifp;
2622         struct ifmediareq *ifmr;
2623 {
2624         struct bge_softc *sc;
2625         struct mii_data *mii;
2626
2627         sc = ifp->if_softc;
2628
2629         if (sc->bge_tbi) {
2630                 ifmr->ifm_status = IFM_AVALID;
2631                 ifmr->ifm_active = IFM_ETHER;
2632                 if (CSR_READ_4(sc, BGE_MAC_STS) &
2633                     BGE_MACSTAT_TBI_PCS_SYNCHED)
2634                         ifmr->ifm_status |= IFM_ACTIVE;
2635                 ifmr->ifm_active |= IFM_1000_SX;
2636                 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
2637                         ifmr->ifm_active |= IFM_HDX;    
2638                 else
2639                         ifmr->ifm_active |= IFM_FDX;
2640                 return;
2641         }
2642
2643         mii = device_get_softc(sc->bge_miibus);
2644         mii_pollstat(mii);
2645         ifmr->ifm_active = mii->mii_media_active;
2646         ifmr->ifm_status = mii->mii_media_status;
2647
2648         return;
2649 }
2650
2651 static int
2652 bge_ioctl(ifp, command, data)
2653         struct ifnet *ifp;
2654         u_long command;
2655         caddr_t data;
2656 {
2657         struct bge_softc *sc = ifp->if_softc;
2658         struct ifreq *ifr = (struct ifreq *) data;
2659         int s, mask, error = 0;
2660         struct mii_data *mii;
2661
2662         s = splimp();
2663
2664         switch(command) {
2665         case SIOCSIFADDR:
2666         case SIOCGIFADDR:
2667                 error = ether_ioctl(ifp, command, data);
2668                 break;
2669         case SIOCSIFMTU:
2670                 if (ifr->ifr_mtu > BGE_JUMBO_MTU)
2671                         error = EINVAL;
2672                 else {
2673                         ifp->if_mtu = ifr->ifr_mtu;
2674                         ifp->if_flags &= ~IFF_RUNNING;
2675                         bge_init(sc);
2676                 }
2677                 break;
2678         case SIOCSIFFLAGS:
2679                 if (ifp->if_flags & IFF_UP) {
2680                         /*
2681                          * If only the state of the PROMISC flag changed,
2682                          * then just use the 'set promisc mode' command
2683                          * instead of reinitializing the entire NIC. Doing
2684                          * a full re-init means reloading the firmware and
2685                          * waiting for it to start up, which may take a
2686                          * second or two.
2687                          */
2688                         if (ifp->if_flags & IFF_RUNNING &&
2689                             ifp->if_flags & IFF_PROMISC &&
2690                             !(sc->bge_if_flags & IFF_PROMISC)) {
2691                                 BGE_SETBIT(sc, BGE_RX_MODE,
2692                                     BGE_RXMODE_RX_PROMISC);
2693                         } else if (ifp->if_flags & IFF_RUNNING &&
2694                             !(ifp->if_flags & IFF_PROMISC) &&
2695                             sc->bge_if_flags & IFF_PROMISC) {
2696                                 BGE_CLRBIT(sc, BGE_RX_MODE,
2697                                     BGE_RXMODE_RX_PROMISC);
2698                         } else
2699                                 bge_init(sc);
2700                 } else {
2701                         if (ifp->if_flags & IFF_RUNNING) {
2702                                 bge_stop(sc);
2703                         }
2704                 }
2705                 sc->bge_if_flags = ifp->if_flags;
2706                 error = 0;
2707                 break;
2708         case SIOCADDMULTI:
2709         case SIOCDELMULTI:
2710                 if (ifp->if_flags & IFF_RUNNING) {
2711                         bge_setmulti(sc);
2712                         error = 0;
2713                 }
2714                 break;
2715         case SIOCSIFMEDIA:
2716         case SIOCGIFMEDIA:
2717                 if (sc->bge_tbi) {
2718                         error = ifmedia_ioctl(ifp, ifr,
2719                             &sc->bge_ifmedia, command);
2720                 } else {
2721                         mii = device_get_softc(sc->bge_miibus);
2722                         error = ifmedia_ioctl(ifp, ifr,
2723                             &mii->mii_media, command);
2724                 }
2725                 break;
2726         case SIOCSIFCAP:
2727                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2728                 if (mask & IFCAP_HWCSUM) {
2729                         if (IFCAP_HWCSUM & ifp->if_capenable)
2730                                 ifp->if_capenable &= ~IFCAP_HWCSUM;
2731                         else
2732                                 ifp->if_capenable |= IFCAP_HWCSUM;
2733                 }
2734                 error = 0;
2735                 break;
2736         default:
2737                 error = EINVAL;
2738                 break;
2739         }
2740
2741         (void)splx(s);
2742
2743         return(error);
2744 }
2745
2746 static void
2747 bge_watchdog(ifp)
2748         struct ifnet *ifp;
2749 {
2750         struct bge_softc *sc;
2751
2752         sc = ifp->if_softc;
2753
2754         printf("bge%d: watchdog timeout -- resetting\n", sc->bge_unit);
2755
2756         ifp->if_flags &= ~IFF_RUNNING;
2757         bge_init(sc);
2758
2759         ifp->if_oerrors++;
2760
2761         return;
2762 }
2763
2764 /*
2765  * Stop the adapter and free any mbufs allocated to the
2766  * RX and TX lists.
2767  */
2768 static void
2769 bge_stop(sc)
2770         struct bge_softc *sc;
2771 {
2772         struct ifnet *ifp;
2773         struct ifmedia_entry *ifm;
2774         struct mii_data *mii = NULL;
2775         int mtmp, itmp;
2776
2777         ifp = &sc->arpcom.ac_if;
2778
2779         if (!sc->bge_tbi)
2780                 mii = device_get_softc(sc->bge_miibus);
2781
2782         untimeout(bge_tick, sc, sc->bge_stat_ch);
2783
2784         /*
2785          * Disable all of the receiver blocks
2786          */
2787         BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2788         BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2789         BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2790         BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2791         BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
2792         BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2793         BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
2794
2795         /*
2796          * Disable all of the transmit blocks
2797          */
2798         BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2799         BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2800         BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2801         BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
2802         BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
2803         BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2804         BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2805
2806         /*
2807          * Shut down all of the memory managers and related
2808          * state machines.
2809          */
2810         BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
2811         BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
2812         BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2813         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
2814         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
2815         BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
2816         BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2817
2818         /* Disable host interrupts. */
2819         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2820         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2821
2822         /*
2823          * Tell firmware we're shutting down.
2824          */
2825         BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2826
2827         /* Free the RX lists. */
2828         bge_free_rx_ring_std(sc);
2829
2830         /* Free jumbo RX list. */
2831         bge_free_rx_ring_jumbo(sc);
2832
2833         /* Free TX buffers. */
2834         bge_free_tx_ring(sc);
2835
2836         /*
2837          * Isolate/power down the PHY, but leave the media selection
2838          * unchanged so that things will be put back to normal when
2839          * we bring the interface back up.
2840          */
2841         if (!sc->bge_tbi) {
2842                 itmp = ifp->if_flags;
2843                 ifp->if_flags |= IFF_UP;
2844                 ifm = mii->mii_media.ifm_cur;
2845                 mtmp = ifm->ifm_media;
2846                 ifm->ifm_media = IFM_ETHER|IFM_NONE;
2847                 mii_mediachg(mii);
2848                 ifm->ifm_media = mtmp;
2849                 ifp->if_flags = itmp;
2850         }
2851
2852         sc->bge_link = 0;
2853
2854         sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
2855
2856         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2857
2858         return;
2859 }
2860
2861 /*
2862  * Stop all chip I/O so that the kernel's probe routines don't
2863  * get confused by errant DMAs when rebooting.
2864  */
2865 static void
2866 bge_shutdown(dev)
2867         device_t dev;
2868 {
2869         struct bge_softc *sc;
2870
2871         sc = device_get_softc(dev);
2872
2873         bge_stop(sc); 
2874         bge_reset(sc);
2875
2876         return;
2877 }