if_printf/device_printf cleanup. Remove minor junk.
[dragonfly.git] / sys / dev / netif / bge / if_bge.c
1 /*
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.29 2003/12/01 21:06:59 ambrisko Exp $
34  * $DragonFly: src/sys/dev/netif/bge/if_bge.c,v 1.29 2005/05/21 07:28:04 joerg Exp $
35  *
36  */
37
38 /*
39  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
40  * 
41  * Written by Bill Paul <wpaul@windriver.com>
42  * Senior Engineer, Wind River Systems
43  */
44
45 /*
46  * The Broadcom BCM5700 is based on technology originally developed by
47  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
49  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
51  * frames, highly configurable RX filtering, and 16 RX and TX queues
52  * (which, along with RX filter rules, can be used for QOS applications).
53  * Other features, such as TCP segmentation, may be available as part
54  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55  * firmware images can be stored in hardware and need not be compiled
56  * into the driver.
57  *
58  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
60  * 
61  * The BCM5701 is a single-chip solution incorporating both the BCM5700
62  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63  * does not support external SSRAM.
64  *
65  * Broadcom also produces a variation of the BCM5700 under the "Altima"
66  * brand name, which is functionally similar but lacks PCI-X support.
67  *
68  * Without external SSRAM, you can only have at most 4 TX rings,
69  * and the use of the mini RX ring is disabled. This seems to imply
70  * that these features are simply not available on the BCM5701. As a
71  * result, this driver does not implement any support for the mini RX
72  * ring.
73  */
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/sockio.h>
78 #include <sys/mbuf.h>
79 #include <sys/malloc.h>
80 #include <sys/kernel.h>
81 #include <sys/socket.h>
82 #include <sys/queue.h>
83
84 #include <net/if.h>
85 #include <net/ifq_var.h>
86 #include <net/if_arp.h>
87 #include <net/ethernet.h>
88 #include <net/if_dl.h>
89 #include <net/if_media.h>
90
91 #include <net/bpf.h>
92
93 #include <net/if_types.h>
94 #include <net/vlan/if_vlan_var.h>
95
96 #include <netinet/in_systm.h>
97 #include <netinet/in.h>
98 #include <netinet/ip.h>
99
100 #include <vm/vm.h>              /* for vtophys */
101 #include <vm/pmap.h>            /* for vtophys */
102 #include <machine/clock.h>      /* for DELAY */
103 #include <machine/bus_memio.h>
104 #include <machine/bus.h>
105 #include <machine/resource.h>
106 #include <sys/bus.h>
107 #include <sys/rman.h>
108
109 #include <dev/netif/mii_layer/mii.h>
110 #include <dev/netif/mii_layer/miivar.h>
111 #include <dev/netif/mii_layer/miidevs.h>
112 #include <dev/netif/mii_layer/brgphyreg.h>
113
114 #include <bus/pci/pcidevs.h>
115 #include <bus/pci/pcireg.h>
116 #include <bus/pci/pcivar.h>
117
118 #include "if_bgereg.h"
119
120 #define BGE_CSUM_FEATURES       (CSUM_IP | CSUM_TCP | CSUM_UDP)
121
122 /* "controller miibus0" required.  See GENERIC if you get errors here. */
123 #include "miibus_if.h"
124
125 /*
126  * Various supported device vendors/types and their names. Note: the
127  * spec seems to indicate that the hardware still has Alteon's vendor
128  * ID burned into it, though it will always be overriden by the vendor
129  * ID in the EEPROM. Just to be safe, we cover all possibilities.
130  */
131 #define BGE_DEVDESC_MAX         64      /* Maximum device description length */
132
133 static struct bge_type bge_devs[] = {
134         { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700,
135                 "Broadcom BCM5700 Gigabit Ethernet" },
136         { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700,
137                 "Broadcom BCM5701 Gigabit Ethernet" },
138         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700,
139                 "Broadcom BCM5700 Gigabit Ethernet" },
140         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701,
141                 "Broadcom BCM5701 Gigabit Ethernet" },
142         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X,
143                 "Broadcom BCM5702X Gigabit Ethernet" },
144         { PCI_VENDOR_BROADCOM, BCOM_DEVICEID_BCM5702X,
145                 "Broadcom BCM5702X Gigabit Ethernet" },
146         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X,
147                 "Broadcom BCM5703X Gigabit Ethernet" },
148         { PCI_VENDOR_BROADCOM, BCOM_DEVICEID_BCM5703X,
149                 "Broadcom BCM5703X Gigabit Ethernet" },
150         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C,
151                 "Broadcom BCM5704C Dual Gigabit Ethernet" },
152         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S,
153                 "Broadcom BCM5704S Dual Gigabit Ethernet" },
154         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705,
155                 "Broadcom BCM5705 Gigabit Ethernet" },
156         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M,
157                 "Broadcom BCM5705M Gigabit Ethernet" },
158         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705_ALT,
159                 "Broadcom BCM5705M Gigabit Ethernet" },
160         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782,
161                 "Broadcom BCM5782 Gigabit Ethernet" },
162         { PCI_VENDOR_BROADCOM, BCOM_DEVICEID_BCM5788,
163                 "Broadcom BCM5788 Gigabit Ethernet" },
164         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901,
165                 "Broadcom BCM5901 Fast Ethernet" },
166         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2,
167                 "Broadcom BCM5901A2 Fast Ethernet" },
168         { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
169                 "SysKonnect Gigabit Ethernet" },
170         { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000,
171                 "Altima AC1000 Gigabit Ethernet" },
172         { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001,
173                 "Altima AC1002 Gigabit Ethernet" },
174         { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100,
175                 "Altima AC9100 Gigabit Ethernet" },
176         { 0, 0, NULL }
177 };
178
179 static int bge_probe            (device_t);
180 static int bge_attach           (device_t);
181 static int bge_detach           (device_t);
182 static void bge_release_resources
183                                 (struct bge_softc *);
184 static void bge_txeof           (struct bge_softc *);
185 static void bge_rxeof           (struct bge_softc *);
186
187 static void bge_tick            (void *);
188 static void bge_stats_update    (struct bge_softc *);
189 static void bge_stats_update_regs
190                                 (struct bge_softc *);
191 static int bge_encap            (struct bge_softc *, struct mbuf *,
192                                         u_int32_t *);
193
194 static void bge_intr            (void *);
195 static void bge_start           (struct ifnet *);
196 static int bge_ioctl            (struct ifnet *, u_long, caddr_t,
197                                         struct ucred *);
198 static void bge_init            (void *);
199 static void bge_stop            (struct bge_softc *);
200 static void bge_watchdog                (struct ifnet *);
201 static void bge_shutdown                (device_t);
202 static int bge_ifmedia_upd      (struct ifnet *);
203 static void bge_ifmedia_sts     (struct ifnet *, struct ifmediareq *);
204
205 static u_int8_t bge_eeprom_getbyte      (struct bge_softc *,
206                                                 int, u_int8_t *);
207 static int bge_read_eeprom      (struct bge_softc *, caddr_t, int, int);
208
209 static u_int32_t bge_crc        (caddr_t);
210 static void bge_setmulti        (struct bge_softc *);
211
212 static void bge_handle_events   (struct bge_softc *);
213 static int bge_alloc_jumbo_mem  (struct bge_softc *);
214 static void bge_free_jumbo_mem  (struct bge_softc *);
215 static void *bge_jalloc         (struct bge_softc *);
216 static void bge_jfree           (caddr_t, u_int);
217 static void bge_jref            (caddr_t, u_int);
218 static int bge_newbuf_std       (struct bge_softc *, int, struct mbuf *);
219 static int bge_newbuf_jumbo     (struct bge_softc *, int, struct mbuf *);
220 static int bge_init_rx_ring_std (struct bge_softc *);
221 static void bge_free_rx_ring_std        (struct bge_softc *);
222 static int bge_init_rx_ring_jumbo       (struct bge_softc *);
223 static void bge_free_rx_ring_jumbo      (struct bge_softc *);
224 static void bge_free_tx_ring    (struct bge_softc *);
225 static int bge_init_tx_ring     (struct bge_softc *);
226
227 static int bge_chipinit         (struct bge_softc *);
228 static int bge_blockinit        (struct bge_softc *);
229
230 #ifdef notdef
231 static u_int8_t bge_vpd_readbyte (struct bge_softc *, int);
232 static void bge_vpd_read_res    (struct bge_softc *,
233                                         struct vpd_res *, int);
234 static void bge_vpd_read        (struct bge_softc *);
235 #endif
236
237 static u_int32_t bge_readmem_ind
238                                 (struct bge_softc *, int);
239 static void bge_writemem_ind    (struct bge_softc *, int, int);
240 #ifdef notdef
241 static u_int32_t bge_readreg_ind
242                                 (struct bge_softc *, int);
243 #endif
244 static void bge_writereg_ind    (struct bge_softc *, int, int);
245
246 static int bge_miibus_readreg   (device_t, int, int);
247 static int bge_miibus_writereg  (device_t, int, int, int);
248 static void bge_miibus_statchg  (device_t);
249
250 static void bge_reset           (struct bge_softc *);
251
252 static device_method_t bge_methods[] = {
253         /* Device interface */
254         DEVMETHOD(device_probe,         bge_probe),
255         DEVMETHOD(device_attach,        bge_attach),
256         DEVMETHOD(device_detach,        bge_detach),
257         DEVMETHOD(device_shutdown,      bge_shutdown),
258
259         /* bus interface */
260         DEVMETHOD(bus_print_child,      bus_generic_print_child),
261         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
262
263         /* MII interface */
264         DEVMETHOD(miibus_readreg,       bge_miibus_readreg),
265         DEVMETHOD(miibus_writereg,      bge_miibus_writereg),
266         DEVMETHOD(miibus_statchg,       bge_miibus_statchg),
267
268         { 0, 0 }
269 };
270
271 static driver_t bge_driver = {
272         "bge",
273         bge_methods,
274         sizeof(struct bge_softc)
275 };
276
277 static devclass_t bge_devclass;
278
279 DECLARE_DUMMY_MODULE(if_bge);
280 DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, 0, 0);
281 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
282
283 static u_int32_t
284 bge_readmem_ind(sc, off)
285         struct bge_softc *sc;
286         int off;
287 {
288         device_t dev;
289
290         dev = sc->bge_dev;
291
292         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
293         return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
294 }
295
296 static void
297 bge_writemem_ind(sc, off, val)
298         struct bge_softc *sc;
299         int off, val;
300 {
301         device_t dev;
302
303         dev = sc->bge_dev;
304
305         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
306         pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
307
308         return;
309 }
310
311 #ifdef notdef
312 static u_int32_t
313 bge_readreg_ind(sc, off)
314         struct bge_softc *sc;
315         int off;
316 {
317         device_t dev;
318
319         dev = sc->bge_dev;
320
321         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
322         return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
323 }
324 #endif
325
326 static void
327 bge_writereg_ind(sc, off, val)
328         struct bge_softc *sc;
329         int off, val;
330 {
331         device_t dev;
332
333         dev = sc->bge_dev;
334
335         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
336         pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
337
338         return;
339 }
340
341 #ifdef notdef
342 static u_int8_t
343 bge_vpd_readbyte(sc, addr)
344         struct bge_softc *sc;
345         int addr;
346 {
347         int i;
348         device_t dev;
349         u_int32_t val;
350
351         dev = sc->bge_dev;
352         pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
353         for (i = 0; i < BGE_TIMEOUT * 10; i++) {
354                 DELAY(10);
355                 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
356                         break;
357         }
358
359         if (i == BGE_TIMEOUT) {
360                 device_printf(sc->bge_dev, "VPD read timed out\n");
361                 return(0);
362         }
363
364         val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
365
366         return((val >> ((addr % 4) * 8)) & 0xFF);
367 }
368
369 static void
370 bge_vpd_read_res(sc, res, addr)
371         struct bge_softc *sc;
372         struct vpd_res *res;
373         int addr;
374 {
375         int i;
376         u_int8_t *ptr;
377
378         ptr = (u_int8_t *)res;
379         for (i = 0; i < sizeof(struct vpd_res); i++)
380                 ptr[i] = bge_vpd_readbyte(sc, i + addr);
381
382         return;
383 }
384
385 static void
386 bge_vpd_read(sc)
387         struct bge_softc *sc;
388 {
389         int pos = 0, i;
390         struct vpd_res res;
391
392         if (sc->bge_vpd_prodname != NULL)
393                 free(sc->bge_vpd_prodname, M_DEVBUF);
394         if (sc->bge_vpd_readonly != NULL)
395                 free(sc->bge_vpd_readonly, M_DEVBUF);
396         sc->bge_vpd_prodname = NULL;
397         sc->bge_vpd_readonly = NULL;
398
399         bge_vpd_read_res(sc, &res, pos);
400
401         if (res.vr_id != VPD_RES_ID) {
402                 device_printf(sc->bge_dev,
403                               "bad VPD resource id: expected %x got %x\n",
404                               VPD_RES_ID, res.vr_id);
405                 return;
406         }
407
408         pos += sizeof(res);
409         sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_INTWAIT);
410         for (i = 0; i < res.vr_len; i++)
411                 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
412         sc->bge_vpd_prodname[i] = '\0';
413         pos += i;
414
415         bge_vpd_read_res(sc, &res, pos);
416
417         if (res.vr_id != VPD_RES_READ) {
418                 device_printf(sc->bge_dev,
419                               "bad VPD resource id: expected %x got %x\n",
420                               VPD_RES_READ, res.vr_id);
421                 return;
422         }
423
424         pos += sizeof(res);
425         sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_INTWAIT);
426         for (i = 0; i < res.vr_len + 1; i++)
427                 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
428
429         return;
430 }
431 #endif
432
433 /*
434  * Read a byte of data stored in the EEPROM at address 'addr.' The
435  * BCM570x supports both the traditional bitbang interface and an
436  * auto access interface for reading the EEPROM. We use the auto
437  * access method.
438  */
439 static u_int8_t
440 bge_eeprom_getbyte(sc, addr, dest)
441         struct bge_softc *sc;
442         int addr;
443         u_int8_t *dest;
444 {
445         int i;
446         u_int32_t byte = 0;
447
448         /*
449          * Enable use of auto EEPROM access so we can avoid
450          * having to use the bitbang method.
451          */
452         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
453
454         /* Reset the EEPROM, load the clock period. */
455         CSR_WRITE_4(sc, BGE_EE_ADDR,
456             BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
457         DELAY(20);
458
459         /* Issue the read EEPROM command. */
460         CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
461
462         /* Wait for completion */
463         for(i = 0; i < BGE_TIMEOUT * 10; i++) {
464                 DELAY(10);
465                 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
466                         break;
467         }
468
469         if (i == BGE_TIMEOUT) {
470                 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
471                 return(0);
472         }
473
474         /* Get result. */
475         byte = CSR_READ_4(sc, BGE_EE_DATA);
476
477         *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
478
479         return(0);
480 }
481
482 /*
483  * Read a sequence of bytes from the EEPROM.
484  */
485 static int
486 bge_read_eeprom(sc, dest, off, cnt)
487         struct bge_softc *sc;
488         caddr_t dest;
489         int off;
490         int cnt;
491 {
492         int err = 0, i;
493         u_int8_t byte = 0;
494
495         for (i = 0; i < cnt; i++) {
496                 err = bge_eeprom_getbyte(sc, off + i, &byte);
497                 if (err)
498                         break;
499                 *(dest + i) = byte;
500         }
501
502         return(err ? 1 : 0);
503 }
504
505 static int
506 bge_miibus_readreg(dev, phy, reg)
507         device_t dev;
508         int phy, reg;
509 {
510         struct bge_softc *sc;
511         struct ifnet *ifp;
512         u_int32_t val, autopoll;
513         int i;
514
515         sc = device_get_softc(dev);
516         ifp = &sc->arpcom.ac_if;
517
518         /*
519          * Broadcom's own driver always assumes the internal
520          * PHY is at GMII address 1. On some chips, the PHY responds
521          * to accesses at all addresses, which could cause us to
522          * bogusly attach the PHY 32 times at probe type. Always
523          * restricting the lookup to address 1 is simpler than
524          * trying to figure out which chips revisions should be
525          * special-cased.
526          */
527         if (phy != 1)
528                 return(0);
529
530         /* Reading with autopolling on may trigger PCI errors */
531         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
532         if (autopoll & BGE_MIMODE_AUTOPOLL) {
533                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
534                 DELAY(40);
535         }
536
537         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
538             BGE_MIPHY(phy)|BGE_MIREG(reg));
539
540         for (i = 0; i < BGE_TIMEOUT; i++) {
541                 val = CSR_READ_4(sc, BGE_MI_COMM);
542                 if (!(val & BGE_MICOMM_BUSY))
543                         break;
544         }
545
546         if (i == BGE_TIMEOUT) {
547                 if_printf(ifp, "PHY read timed out\n");
548                 val = 0;
549                 goto done;
550         }
551
552         val = CSR_READ_4(sc, BGE_MI_COMM);
553
554 done:
555         if (autopoll & BGE_MIMODE_AUTOPOLL) {
556                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
557                 DELAY(40);
558         }
559
560         if (val & BGE_MICOMM_READFAIL)
561                 return(0);
562
563         return(val & 0xFFFF);
564 }
565
566 static int
567 bge_miibus_writereg(dev, phy, reg, val)
568         device_t dev;
569         int phy, reg, val;
570 {
571         struct bge_softc *sc;
572         u_int32_t autopoll;
573         int i;
574
575         sc = device_get_softc(dev);
576
577         /* Reading with autopolling on may trigger PCI errors */
578         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
579         if (autopoll & BGE_MIMODE_AUTOPOLL) {
580                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
581                 DELAY(40);
582         }
583
584         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
585             BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
586
587         for (i = 0; i < BGE_TIMEOUT; i++) {
588                 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
589                         break;
590         }
591
592         if (autopoll & BGE_MIMODE_AUTOPOLL) {
593                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
594                 DELAY(40);
595         }
596
597         if (i == BGE_TIMEOUT) {
598                 if_printf(&sc->arpcom.ac_if, "PHY read timed out\n");
599                 return(0);
600         }
601
602         return(0);
603 }
604
605 static void
606 bge_miibus_statchg(dev)
607         device_t dev;
608 {
609         struct bge_softc *sc;
610         struct mii_data *mii;
611
612         sc = device_get_softc(dev);
613         mii = device_get_softc(sc->bge_miibus);
614
615         BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
616         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
617                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
618         } else {
619                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
620         }
621
622         if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
623                 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
624         } else {
625                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
626         }
627
628         return;
629 }
630
631 /*
632  * Handle events that have triggered interrupts.
633  */
634 static void
635 bge_handle_events(sc)
636         struct bge_softc                *sc;
637 {
638
639         return;
640 }
641
642 /*
643  * Memory management for jumbo frames.
644  */
645
646 static int
647 bge_alloc_jumbo_mem(sc)
648         struct bge_softc                *sc;
649 {
650         caddr_t                 ptr;
651         int             i;
652         struct bge_jpool_entry   *entry;
653
654         /* Grab a big chunk o' storage. */
655         sc->bge_cdata.bge_jumbo_buf = contigmalloc(BGE_JMEM, M_DEVBUF,
656                 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
657
658         if (sc->bge_cdata.bge_jumbo_buf == NULL) {
659                 if_printf(&sc->arpcom.ac_if, "no memory for jumbo buffers!\n");
660                 return(ENOBUFS);
661         }
662
663         SLIST_INIT(&sc->bge_jfree_listhead);
664         SLIST_INIT(&sc->bge_jinuse_listhead);
665
666         /*
667          * Now divide it up into 9K pieces and save the addresses
668          * in an array. Note that we play an evil trick here by using
669          * the first few bytes in the buffer to hold the the address
670          * of the softc structure for this interface. This is because
671          * bge_jfree() needs it, but it is called by the mbuf management
672          * code which will not pass it to us explicitly.
673          */
674         ptr = sc->bge_cdata.bge_jumbo_buf;
675         for (i = 0; i < BGE_JSLOTS; i++) {
676                 u_int64_t               **aptr;
677                 aptr = (u_int64_t **)ptr;
678                 aptr[0] = (u_int64_t *)sc;
679                 ptr += sizeof(u_int64_t);
680                 sc->bge_cdata.bge_jslots[i].bge_buf = ptr;
681                 sc->bge_cdata.bge_jslots[i].bge_inuse = 0;
682                 ptr += (BGE_JLEN - sizeof(u_int64_t));
683                 entry = malloc(sizeof(struct bge_jpool_entry), 
684                                M_DEVBUF, M_INTWAIT);
685                 entry->slot = i;
686                 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
687                     entry, jpool_entries);
688         }
689
690         return(0);
691 }
692
693 static void
694 bge_free_jumbo_mem(sc)
695         struct bge_softc *sc;
696 {
697         int i;
698         struct bge_jpool_entry *entry;
699  
700         for (i = 0; i < BGE_JSLOTS; i++) {
701                 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
702                 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
703                 free(entry, M_DEVBUF);
704         }
705
706         contigfree(sc->bge_cdata.bge_jumbo_buf, BGE_JMEM, M_DEVBUF);
707
708         return;
709 }
710
711 /*
712  * Allocate a jumbo buffer.
713  */
714 static void *
715 bge_jalloc(sc)
716         struct bge_softc                *sc;
717 {
718         struct bge_jpool_entry   *entry;
719         
720         entry = SLIST_FIRST(&sc->bge_jfree_listhead);
721         
722         if (entry == NULL) {
723                 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
724                 return(NULL);
725         }
726
727         SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
728         SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
729         sc->bge_cdata.bge_jslots[entry->slot].bge_inuse = 1;
730         return(sc->bge_cdata.bge_jslots[entry->slot].bge_buf);
731 }
732
733 /*
734  * Adjust usage count on a jumbo buffer.
735  */
736 static void
737 bge_jref(buf, size)
738         caddr_t                 buf;
739         u_int                   size;
740 {
741         struct bge_softc                *sc;
742         u_int64_t               **aptr;
743         int             i;
744
745         /* Extract the softc struct pointer. */
746         aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
747         sc = (struct bge_softc *)(aptr[0]);
748
749         if (sc == NULL)
750                 panic("bge_jref: can't find softc pointer!");
751
752         if (size != BGE_JUMBO_FRAMELEN)
753                 panic("bge_jref: adjusting refcount of buf of wrong size!");
754
755         /* calculate the slot this buffer belongs to */
756
757         i = ((vm_offset_t)aptr 
758              - (vm_offset_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
759
760         if ((i < 0) || (i >= BGE_JSLOTS))
761                 panic("bge_jref: asked to reference buffer "
762                     "that we don't manage!");
763         else if (sc->bge_cdata.bge_jslots[i].bge_inuse == 0)
764                 panic("bge_jref: buffer already free!");
765         else
766                 sc->bge_cdata.bge_jslots[i].bge_inuse++;
767
768         return;
769 }
770
771 /*
772  * Release a jumbo buffer.
773  */
774 static void
775 bge_jfree(buf, size)
776         caddr_t                 buf;
777         u_int                   size;
778 {
779         struct bge_softc                *sc;
780         u_int64_t               **aptr;
781         int                     i;
782         struct bge_jpool_entry   *entry;
783
784         /* Extract the softc struct pointer. */
785         aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
786         sc = (struct bge_softc *)(aptr[0]);
787
788         if (sc == NULL)
789                 panic("bge_jfree: can't find softc pointer!");
790
791         if (size != BGE_JUMBO_FRAMELEN)
792                 panic("bge_jfree: freeing buffer of wrong size!");
793
794         /* calculate the slot this buffer belongs to */
795
796         i = ((vm_offset_t)aptr 
797              - (vm_offset_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
798
799         if ((i < 0) || (i >= BGE_JSLOTS))
800                 panic("bge_jfree: asked to free buffer that we don't manage!");
801         else if (sc->bge_cdata.bge_jslots[i].bge_inuse == 0)
802                 panic("bge_jfree: buffer already free!");
803         else {
804                 sc->bge_cdata.bge_jslots[i].bge_inuse--;
805                 if(sc->bge_cdata.bge_jslots[i].bge_inuse == 0) {
806                         entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
807                         if (entry == NULL)
808                                 panic("bge_jfree: buffer not in use!");
809                         entry->slot = i;
810                         SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, 
811                                           jpool_entries);
812                         SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 
813                                           entry, jpool_entries);
814                 }
815         }
816
817         return;
818 }
819
820
821 /*
822  * Intialize a standard receive ring descriptor.
823  */
824 static int
825 bge_newbuf_std(sc, i, m)
826         struct bge_softc        *sc;
827         int                     i;
828         struct mbuf             *m;
829 {
830         struct mbuf             *m_new = NULL;
831         struct bge_rx_bd        *r;
832
833         if (m == NULL) {
834                 MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
835                 if (m_new == NULL) {
836                         return(ENOBUFS);
837                 }
838
839                 MCLGET(m_new, MB_DONTWAIT);
840                 if (!(m_new->m_flags & M_EXT)) {
841                         m_freem(m_new);
842                         return(ENOBUFS);
843                 }
844                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
845         } else {
846                 m_new = m;
847                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
848                 m_new->m_data = m_new->m_ext.ext_buf;
849         }
850
851         if (!sc->bge_rx_alignment_bug)
852                 m_adj(m_new, ETHER_ALIGN);
853         sc->bge_cdata.bge_rx_std_chain[i] = m_new;
854         r = &sc->bge_rdata->bge_rx_std_ring[i];
855         BGE_HOSTADDR(r->bge_addr, vtophys(mtod(m_new, caddr_t)));
856         r->bge_flags = BGE_RXBDFLAG_END;
857         r->bge_len = m_new->m_len;
858         r->bge_idx = i;
859
860         return(0);
861 }
862
863 /*
864  * Initialize a jumbo receive ring descriptor. This allocates
865  * a jumbo buffer from the pool managed internally by the driver.
866  */
867 static int
868 bge_newbuf_jumbo(sc, i, m)
869         struct bge_softc *sc;
870         int i;
871         struct mbuf *m;
872 {
873         struct mbuf *m_new = NULL;
874         struct bge_rx_bd *r;
875
876         if (m == NULL) {
877                 caddr_t                 *buf = NULL;
878
879                 /* Allocate the mbuf. */
880                 MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
881                 if (m_new == NULL) {
882                         return(ENOBUFS);
883                 }
884
885                 /* Allocate the jumbo buffer */
886                 buf = bge_jalloc(sc);
887                 if (buf == NULL) {
888                         m_freem(m_new);
889                         if_printf(&sc->arpcom.ac_if, "jumbo allocation failed "
890                             "-- packet dropped!\n");
891                         return(ENOBUFS);
892                 }
893
894                 /* Attach the buffer to the mbuf. */
895                 m_new->m_data = m_new->m_ext.ext_buf = (void *)buf;
896                 m_new->m_flags |= M_EXT | M_EXT_OLD;
897                 m_new->m_len = m_new->m_pkthdr.len =
898                     m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
899                 m_new->m_ext.ext_nfree.old = bge_jfree;
900                 m_new->m_ext.ext_nref.old = bge_jref;
901         } else {
902                 m_new = m;
903                 m_new->m_data = m_new->m_ext.ext_buf;
904                 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
905         }
906
907         if (!sc->bge_rx_alignment_bug)
908                 m_adj(m_new, ETHER_ALIGN);
909         /* Set up the descriptor. */
910         r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
911         sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
912         BGE_HOSTADDR(r->bge_addr, vtophys(mtod(m_new, caddr_t)));
913         r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
914         r->bge_len = m_new->m_len;
915         r->bge_idx = i;
916
917         return(0);
918 }
919
920 /*
921  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
922  * that's 1MB or memory, which is a lot. For now, we fill only the first
923  * 256 ring entries and hope that our CPU is fast enough to keep up with
924  * the NIC.
925  */
926 static int
927 bge_init_rx_ring_std(sc)
928         struct bge_softc *sc;
929 {
930         int i;
931
932         for (i = 0; i < BGE_SSLOTS; i++) {
933                 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
934                         return(ENOBUFS);
935         };
936
937         sc->bge_std = i - 1;
938         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
939
940         return(0);
941 }
942
943 static void
944 bge_free_rx_ring_std(sc)
945         struct bge_softc *sc;
946 {
947         int i;
948
949         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
950                 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
951                         m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
952                         sc->bge_cdata.bge_rx_std_chain[i] = NULL;
953                 }
954                 bzero((char *)&sc->bge_rdata->bge_rx_std_ring[i],
955                     sizeof(struct bge_rx_bd));
956         }
957
958         return;
959 }
960
961 static int
962 bge_init_rx_ring_jumbo(sc)
963         struct bge_softc *sc;
964 {
965         int i;
966         struct bge_rcb *rcb;
967
968         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
969                 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
970                         return(ENOBUFS);
971         };
972
973         sc->bge_jumbo = i - 1;
974
975         rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
976         rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
977         CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
978
979         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
980
981         return(0);
982 }
983
984 static void
985 bge_free_rx_ring_jumbo(sc)
986         struct bge_softc *sc;
987 {
988         int i;
989
990         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
991                 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
992                         m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
993                         sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
994                 }
995                 bzero((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i],
996                     sizeof(struct bge_rx_bd));
997         }
998
999         return;
1000 }
1001
1002 static void
1003 bge_free_tx_ring(sc)
1004         struct bge_softc *sc;
1005 {
1006         int i;
1007
1008         if (sc->bge_rdata->bge_tx_ring == NULL)
1009                 return;
1010
1011         for (i = 0; i < BGE_TX_RING_CNT; i++) {
1012                 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1013                         m_freem(sc->bge_cdata.bge_tx_chain[i]);
1014                         sc->bge_cdata.bge_tx_chain[i] = NULL;
1015                 }
1016                 bzero((char *)&sc->bge_rdata->bge_tx_ring[i],
1017                     sizeof(struct bge_tx_bd));
1018         }
1019
1020         return;
1021 }
1022
1023 static int
1024 bge_init_tx_ring(sc)
1025         struct bge_softc *sc;
1026 {
1027         sc->bge_txcnt = 0;
1028         sc->bge_tx_saved_considx = 0;
1029
1030         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1031         /* 5700 b2 errata */
1032         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1033                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1034
1035         CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1036         /* 5700 b2 errata */
1037         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1038                 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1039
1040         return(0);
1041 }
1042
1043 #define BGE_POLY        0xEDB88320
1044
1045 static u_int32_t
1046 bge_crc(addr)
1047         caddr_t addr;
1048 {
1049         u_int32_t idx, bit, data, crc;
1050
1051         /* Compute CRC for the address value. */
1052         crc = 0xFFFFFFFF; /* initial value */
1053
1054         for (idx = 0; idx < 6; idx++) {
1055                 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1)
1056                         crc = (crc >> 1) ^ (((crc ^ data) & 1) ? BGE_POLY : 0);
1057         }
1058
1059         return(crc & 0x7F);
1060 }
1061
1062 static void
1063 bge_setmulti(sc)
1064         struct bge_softc *sc;
1065 {
1066         struct ifnet *ifp;
1067         struct ifmultiaddr *ifma;
1068         u_int32_t hashes[4] = { 0, 0, 0, 0 };
1069         int h, i;
1070
1071         ifp = &sc->arpcom.ac_if;
1072
1073         if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1074                 for (i = 0; i < 4; i++)
1075                         CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1076                 return;
1077         }
1078
1079         /* First, zot all the existing filters. */
1080         for (i = 0; i < 4; i++)
1081                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1082
1083         /* Now program new ones. */
1084         for (ifma = ifp->if_multiaddrs.lh_first;
1085             ifma != NULL; ifma = ifma->ifma_link.le_next) {
1086                 if (ifma->ifma_addr->sa_family != AF_LINK)
1087                         continue;
1088                 h = bge_crc(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1089                 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1090         }
1091
1092         for (i = 0; i < 4; i++)
1093                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1094
1095         return;
1096 }
1097
1098 /*
1099  * Do endian, PCI and DMA initialization. Also check the on-board ROM
1100  * self-test results.
1101  */
1102 static int
1103 bge_chipinit(sc)
1104         struct bge_softc *sc;
1105 {
1106         int                     i;
1107         u_int32_t               dma_rw_ctl;
1108
1109         /* Set endianness before we access any non-PCI registers. */
1110 #if BYTE_ORDER == BIG_ENDIAN
1111         pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1112             BGE_BIGENDIAN_INIT, 4);
1113 #else
1114         pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1115             BGE_LITTLEENDIAN_INIT, 4);
1116 #endif
1117
1118         /*
1119          * Check the 'ROM failed' bit on the RX CPU to see if
1120          * self-tests passed.
1121          */
1122         if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1123                 if_printf(&sc->arpcom.ac_if,
1124                           "RX CPU self-diagnostics failed!\n");
1125                 return(ENODEV);
1126         }
1127
1128         /* Clear the MAC control register */
1129         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1130
1131         /*
1132          * Clear the MAC statistics block in the NIC's
1133          * internal memory.
1134          */
1135         for (i = BGE_STATS_BLOCK;
1136             i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1137                 BGE_MEMWIN_WRITE(sc, i, 0);
1138
1139         for (i = BGE_STATUS_BLOCK;
1140             i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1141                 BGE_MEMWIN_WRITE(sc, i, 0);
1142
1143         /* Set up the PCI DMA control register. */
1144         if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1145             BGE_PCISTATE_PCI_BUSMODE) {
1146                 /* Conventional PCI bus */
1147                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1148                     (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1149                     (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1150                     (0x0F);
1151         } else {
1152                 /* PCI-X bus */
1153                 /*
1154                  * The 5704 uses a different encoding of read/write
1155                  * watermarks.
1156                  */
1157                 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1158                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1159                             (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1160                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1161                 else
1162                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1163                             (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1164                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1165                             (0x0F);
1166
1167                 /*
1168                  * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1169                  * for hardware bugs.
1170                  */
1171                 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1172                     sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1173                         u_int32_t tmp;
1174
1175                         tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1176                         if (tmp == 0x6 || tmp == 0x7)
1177                                 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1178                 }
1179         }
1180
1181         if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1182             sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1183             sc->bge_asicrev == BGE_ASICREV_BCM5705)
1184                 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1185         pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1186
1187         /*
1188          * Set up general mode register.
1189          */
1190         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME|
1191             BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1192             BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1193             BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
1194
1195         /*
1196          * Disable memory write invalidate.  Apparently it is not supported
1197          * properly by these devices.
1198          */
1199         PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1200
1201         /* Set the timer prescaler (always 66Mhz) */
1202         CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1203
1204         return(0);
1205 }
1206
1207 static int
1208 bge_blockinit(sc)
1209         struct bge_softc *sc;
1210 {
1211         struct bge_rcb *rcb;
1212         volatile struct bge_rcb *vrcb;
1213         int i;
1214
1215         /*
1216          * Initialize the memory window pointer register so that
1217          * we can access the first 32K of internal NIC RAM. This will
1218          * allow us to set up the TX send ring RCBs and the RX return
1219          * ring RCBs, plus other things which live in NIC memory.
1220          */
1221         CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1222
1223         /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1224
1225         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1226                 /* Configure mbuf memory pool */
1227                 if (sc->bge_extram) {
1228                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1229                             BGE_EXT_SSRAM);
1230                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1231                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1232                         else
1233                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1234                 } else {
1235                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1236                             BGE_BUFFPOOL_1);
1237                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1238                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1239                         else
1240                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1241                 }
1242
1243                 /* Configure DMA resource pool */
1244                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1245                     BGE_DMA_DESCRIPTORS);
1246                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1247         }
1248
1249         /* Configure mbuf pool watermarks */
1250         if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
1251                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1252                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1253         } else {
1254                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1255                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1256         }
1257         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1258
1259         /* Configure DMA resource watermarks */
1260         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1261         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1262
1263         /* Enable buffer manager */
1264         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1265                 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1266                     BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1267
1268                 /* Poll for buffer manager start indication */
1269                 for (i = 0; i < BGE_TIMEOUT; i++) {
1270                         if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1271                                 break;
1272                         DELAY(10);
1273                 }
1274
1275                 if (i == BGE_TIMEOUT) {
1276                         if_printf(&sc->arpcom.ac_if,
1277                                   "buffer manager failed to start\n");
1278                         return(ENXIO);
1279                 }
1280         }
1281
1282         /* Enable flow-through queues */
1283         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1284         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1285
1286         /* Wait until queue initialization is complete */
1287         for (i = 0; i < BGE_TIMEOUT; i++) {
1288                 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1289                         break;
1290                 DELAY(10);
1291         }
1292
1293         if (i == BGE_TIMEOUT) {
1294                 if_printf(&sc->arpcom.ac_if,
1295                           "flow-through queue init failed\n");
1296                 return(ENXIO);
1297         }
1298
1299         /* Initialize the standard RX ring control block */
1300         rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1301         BGE_HOSTADDR(rcb->bge_hostaddr,
1302             vtophys(&sc->bge_rdata->bge_rx_std_ring));
1303         if (sc->bge_asicrev == BGE_ASICREV_BCM5705)
1304                 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1305         else
1306                 rcb->bge_maxlen_flags =
1307                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1308         if (sc->bge_extram)
1309                 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1310         else
1311                 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1312         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1313         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1314         CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1315         CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1316
1317         /*
1318          * Initialize the jumbo RX ring control block
1319          * We set the 'ring disabled' bit in the flags
1320          * field until we're actually ready to start
1321          * using this ring (i.e. once we set the MTU
1322          * high enough to require it).
1323          */
1324         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1325                 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1326                 BGE_HOSTADDR(rcb->bge_hostaddr,
1327                     vtophys(&sc->bge_rdata->bge_rx_jumbo_ring));
1328                 rcb->bge_maxlen_flags =
1329                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1330                     BGE_RCB_FLAG_RING_DISABLED);
1331                 if (sc->bge_extram)
1332                         rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1333                 else
1334                         rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1335                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1336                     rcb->bge_hostaddr.bge_addr_hi);
1337                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1338                     rcb->bge_hostaddr.bge_addr_lo);
1339                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1340                     rcb->bge_maxlen_flags);
1341                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1342
1343                 /* Set up dummy disabled mini ring RCB */
1344                 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
1345                 rcb->bge_maxlen_flags =
1346                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1347                 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1348                     rcb->bge_maxlen_flags);
1349         }
1350
1351         /*
1352          * Set the BD ring replentish thresholds. The recommended
1353          * values are 1/8th the number of descriptors allocated to
1354          * each ring.
1355          */
1356         CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1357         CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1358
1359         /*
1360          * Disable all unused send rings by setting the 'ring disabled'
1361          * bit in the flags field of all the TX send ring control blocks.
1362          * These are located in NIC memory.
1363          */
1364         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1365             BGE_SEND_RING_RCB);
1366         for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1367                 vrcb->bge_maxlen_flags =
1368                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1369                 vrcb->bge_nicaddr = 0;
1370                 vrcb++;
1371         }
1372
1373         /* Configure TX RCB 0 (we use only the first ring) */
1374         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1375             BGE_SEND_RING_RCB);
1376         vrcb->bge_hostaddr.bge_addr_hi = 0;
1377         BGE_HOSTADDR(vrcb->bge_hostaddr, vtophys(&sc->bge_rdata->bge_tx_ring));
1378         vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT);
1379         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1380                 vrcb->bge_maxlen_flags =
1381                     BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0);
1382
1383         /* Disable all unused RX return rings */
1384         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1385             BGE_RX_RETURN_RING_RCB);
1386         for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1387                 vrcb->bge_hostaddr.bge_addr_hi = 0;
1388                 vrcb->bge_hostaddr.bge_addr_lo = 0;
1389                 vrcb->bge_maxlen_flags =
1390                     BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1391                     BGE_RCB_FLAG_RING_DISABLED);
1392                 vrcb->bge_nicaddr = 0;
1393                 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1394                     (i * (sizeof(u_int64_t))), 0);
1395                 vrcb++;
1396         }
1397
1398         /* Initialize RX ring indexes */
1399         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1400         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1401         CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1402
1403         /*
1404          * Set up RX return ring 0
1405          * Note that the NIC address for RX return rings is 0x00000000.
1406          * The return rings live entirely within the host, so the
1407          * nicaddr field in the RCB isn't used.
1408          */
1409         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1410             BGE_RX_RETURN_RING_RCB);
1411         vrcb->bge_hostaddr.bge_addr_hi = 0;
1412         BGE_HOSTADDR(vrcb->bge_hostaddr,
1413             vtophys(&sc->bge_rdata->bge_rx_return_ring));
1414         vrcb->bge_nicaddr = 0x00000000;
1415         vrcb->bge_maxlen_flags =
1416             BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0);
1417
1418         /* Set random backoff seed for TX */
1419         CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1420             sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1421             sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1422             sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1423             BGE_TX_BACKOFF_SEED_MASK);
1424
1425         /* Set inter-packet gap */
1426         CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1427
1428         /*
1429          * Specify which ring to use for packets that don't match
1430          * any RX rules.
1431          */
1432         CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1433
1434         /*
1435          * Configure number of RX lists. One interrupt distribution
1436          * list, sixteen active lists, one bad frames class.
1437          */
1438         CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1439
1440         /* Inialize RX list placement stats mask. */
1441         CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1442         CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1443
1444         /* Disable host coalescing until we get it set up */
1445         CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1446
1447         /* Poll to make sure it's shut down. */
1448         for (i = 0; i < BGE_TIMEOUT; i++) {
1449                 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1450                         break;
1451                 DELAY(10);
1452         }
1453
1454         if (i == BGE_TIMEOUT) {
1455                 if_printf(&sc->arpcom.ac_if,
1456                           "host coalescing engine failed to idle\n");
1457                 return(ENXIO);
1458         }
1459
1460         /* Set up host coalescing defaults */
1461         CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1462         CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1463         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1464         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1465         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1466                 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1467                 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1468         }
1469         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1470         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1471
1472         /* Set up address of statistics block */
1473         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1474                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 0);
1475                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1476                     vtophys(&sc->bge_rdata->bge_info.bge_stats));
1477
1478                 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1479                 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1480                 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1481         }
1482
1483         /* Set up address of status block */
1484         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 0);
1485         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1486             vtophys(&sc->bge_rdata->bge_status_block));
1487
1488         sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
1489         sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
1490
1491         /* Turn on host coalescing state machine */
1492         CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1493
1494         /* Turn on RX BD completion state machine and enable attentions */
1495         CSR_WRITE_4(sc, BGE_RBDC_MODE,
1496             BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1497
1498         /* Turn on RX list placement state machine */
1499         CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1500
1501         /* Turn on RX list selector state machine. */
1502         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1503                 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1504
1505         /* Turn on DMA, clear stats */
1506         CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1507             BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1508             BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1509             BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1510             (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1511
1512         /* Set misc. local control, enable interrupts on attentions */
1513         CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1514
1515 #ifdef notdef
1516         /* Assert GPIO pins for PHY reset */
1517         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1518             BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1519         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1520             BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1521 #endif
1522
1523         /* Turn on DMA completion state machine */
1524         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1525                 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1526
1527         /* Turn on write DMA state machine */
1528         CSR_WRITE_4(sc, BGE_WDMA_MODE,
1529             BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1530         
1531         /* Turn on read DMA state machine */
1532         CSR_WRITE_4(sc, BGE_RDMA_MODE,
1533             BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1534
1535         /* Turn on RX data completion state machine */
1536         CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1537
1538         /* Turn on RX BD initiator state machine */
1539         CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1540
1541         /* Turn on RX data and RX BD initiator state machine */
1542         CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1543
1544         /* Turn on Mbuf cluster free state machine */
1545         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1546                 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1547
1548         /* Turn on send BD completion state machine */
1549         CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1550
1551         /* Turn on send data completion state machine */
1552         CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1553
1554         /* Turn on send data initiator state machine */
1555         CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1556
1557         /* Turn on send BD initiator state machine */
1558         CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1559
1560         /* Turn on send BD selector state machine */
1561         CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1562
1563         CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1564         CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1565             BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1566
1567         /* ack/clear link change events */
1568         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1569             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1570             BGE_MACSTAT_LINK_CHANGED);
1571
1572         /* Enable PHY auto polling (for MII/GMII only) */
1573         if (sc->bge_tbi) {
1574                 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1575         } else {
1576                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1577                 if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1578                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1579                             BGE_EVTENB_MI_INTERRUPT);
1580         }
1581
1582         /* Enable link state change attentions. */
1583         BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1584
1585         return(0);
1586 }
1587
1588 /*
1589  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1590  * against our list and return its name if we find a match. Note
1591  * that since the Broadcom controller contains VPD support, we
1592  * can get the device name string from the controller itself instead
1593  * of the compiled-in string. This is a little slow, but it guarantees
1594  * we'll always announce the right product name.
1595  */
1596 static int
1597 bge_probe(dev)
1598         device_t dev;
1599 {
1600         struct bge_type *t;
1601         struct bge_softc *sc;
1602         char *descbuf;
1603
1604         t = bge_devs;
1605
1606         sc = device_get_softc(dev);
1607         bzero(sc, sizeof(struct bge_softc));
1608         sc->bge_dev = dev;
1609
1610         while(t->bge_name != NULL) {
1611                 if ((pci_get_vendor(dev) == t->bge_vid) &&
1612                     (pci_get_device(dev) == t->bge_did)) {
1613 #ifdef notdef
1614                         bge_vpd_read(sc);
1615                         device_set_desc(dev, sc->bge_vpd_prodname);
1616 #endif
1617                         descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_INTWAIT);
1618                         snprintf(descbuf, BGE_DEVDESC_MAX,
1619                             "%s, ASIC rev. %#04x", t->bge_name,
1620                             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1621                         device_set_desc_copy(dev, descbuf);
1622                         if (pci_get_subvendor(dev) == PCI_VENDOR_DELL)
1623                                 sc->bge_no_3_led = 1;
1624                         free(descbuf, M_TEMP);
1625                         return(0);
1626                 }
1627                 t++;
1628         }
1629
1630         return(ENXIO);
1631 }
1632
1633 static int
1634 bge_attach(dev)
1635         device_t dev;
1636 {
1637         int s;
1638         u_int32_t command;
1639         struct ifnet *ifp;
1640         struct bge_softc *sc;
1641         u_int32_t hwcfg = 0;
1642         u_int32_t mac_addr = 0;
1643         int error = 0, rid;
1644         uint8_t ether_addr[ETHER_ADDR_LEN];
1645
1646         s = splimp();
1647
1648         sc = device_get_softc(dev);
1649         sc->bge_dev = dev;
1650         callout_init(&sc->bge_stat_timer);
1651
1652         /*
1653          * Map control/status registers.
1654          */
1655         command = pci_read_config(dev, PCIR_COMMAND, 4);
1656         command |= (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
1657         pci_write_config(dev, PCIR_COMMAND, command, 4);
1658         command = pci_read_config(dev, PCIR_COMMAND, 4);
1659
1660         if (!(command & PCIM_CMD_MEMEN)) {
1661                 device_printf(dev, "failed to enable memory mapping!\n");
1662                 error = ENXIO;
1663                 goto fail;
1664         }
1665
1666         rid = BGE_PCI_BAR0;
1667         sc->bge_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
1668             0, ~0, 1, RF_ACTIVE);
1669
1670         if (sc->bge_res == NULL) {
1671                 device_printf(dev, "couldn't map memory\n");
1672                 error = ENXIO;
1673                 goto fail;
1674         }
1675
1676         sc->bge_btag = rman_get_bustag(sc->bge_res);
1677         sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
1678         sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res);
1679
1680         /* Allocate interrupt */
1681         rid = 0;
1682         
1683         sc->bge_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
1684             RF_SHAREABLE | RF_ACTIVE);
1685
1686         if (sc->bge_irq == NULL) {
1687                 device_printf(dev, "couldn't map interrupt\n");
1688                 error = ENXIO;
1689                 goto fail;
1690         }
1691
1692         error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET,
1693            bge_intr, sc, &sc->bge_intrhand);
1694
1695         if (error) {
1696                 bge_release_resources(sc);
1697                 device_printf(dev, "couldn't set up irq\n");
1698                 goto fail;
1699         }
1700
1701         ifp = &sc->arpcom.ac_if;
1702         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1703
1704         /* Try to reset the chip. */
1705         bge_reset(sc);
1706
1707         if (bge_chipinit(sc)) {
1708                 device_printf(dev, "chip initialization failed\n");
1709                 bge_release_resources(sc);
1710                 error = ENXIO;
1711                 goto fail;
1712         }
1713
1714         /*
1715          * Get station address from the EEPROM.
1716          */
1717         mac_addr = bge_readmem_ind(sc, 0x0c14);
1718         if ((mac_addr >> 16) == 0x484b) {
1719                 ether_addr[0] = (uint8_t)(mac_addr >> 8);
1720                 ether_addr[1] = (uint8_t)mac_addr;
1721                 mac_addr = bge_readmem_ind(sc, 0x0c18);
1722                 ether_addr[2] = (uint8_t)(mac_addr >> 24);
1723                 ether_addr[3] = (uint8_t)(mac_addr >> 16);
1724                 ether_addr[4] = (uint8_t)(mac_addr >> 8);
1725                 ether_addr[5] = (uint8_t)mac_addr;
1726         } else if (bge_read_eeprom(sc, ether_addr,
1727             BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
1728                 device_printf(dev, "failed to read station address\n");
1729                 bge_release_resources(sc);
1730                 error = ENXIO;
1731                 goto fail;
1732         }
1733
1734         /* Allocate the general information block and ring buffers. */
1735         sc->bge_rdata = contigmalloc(sizeof(struct bge_ring_data), M_DEVBUF,
1736             M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1737
1738         if (sc->bge_rdata == NULL) {
1739                 bge_release_resources(sc);
1740                 error = ENXIO;
1741                 device_printf(dev, "no memory for list buffers!\n");
1742                 goto fail;
1743         }
1744
1745         bzero(sc->bge_rdata, sizeof(struct bge_ring_data));
1746
1747         /* Save ASIC rev. */
1748
1749         sc->bge_chipid =
1750             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1751             BGE_PCIMISCCTL_ASICREV;
1752         sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
1753         sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
1754
1755         /*
1756          * Try to allocate memory for jumbo buffers.
1757          * The 5705 does not appear to support jumbo frames.
1758          */
1759         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1760                 if (bge_alloc_jumbo_mem(sc)) {
1761                         device_printf(dev, "jumbo buffer allocation failed\n");
1762                         bge_release_resources(sc);
1763                         error = ENXIO;
1764                         goto fail;
1765                 }
1766         }
1767
1768         /* Set default tuneable values. */
1769         sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
1770         sc->bge_rx_coal_ticks = 150;
1771         sc->bge_tx_coal_ticks = 150;
1772         sc->bge_rx_max_coal_bds = 64;
1773         sc->bge_tx_max_coal_bds = 128;
1774
1775         /* 5705 limits RX return ring to 512 entries. */
1776         if (sc->bge_asicrev == BGE_ASICREV_BCM5705)
1777                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1778         else
1779                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
1780
1781         /* Set up ifnet structure */
1782         ifp->if_softc = sc;
1783         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1784         ifp->if_ioctl = bge_ioctl;
1785         ifp->if_start = bge_start;
1786         ifp->if_watchdog = bge_watchdog;
1787         ifp->if_init = bge_init;
1788         ifp->if_mtu = ETHERMTU;
1789         ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1790         ifq_set_ready(&ifp->if_snd);
1791         ifp->if_hwassist = BGE_CSUM_FEATURES;
1792         ifp->if_capabilities = IFCAP_HWCSUM;
1793         ifp->if_capenable = ifp->if_capabilities;
1794
1795         /*
1796          * Figure out what sort of media we have by checking the
1797          * hardware config word in the first 32k of NIC internal memory,
1798          * or fall back to examining the EEPROM if necessary.
1799          * Note: on some BCM5700 cards, this value appears to be unset.
1800          * If that's the case, we have to rely on identifying the NIC
1801          * by its PCI subsystem ID, as we do below for the SysKonnect
1802          * SK-9D41.
1803          */
1804         if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
1805                 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
1806         else {
1807                 bge_read_eeprom(sc, (caddr_t)&hwcfg,
1808                                 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
1809                 hwcfg = ntohl(hwcfg);
1810         }
1811
1812         if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
1813                 sc->bge_tbi = 1;
1814
1815         /* The SysKonnect SK-9D41 is a 1000baseSX card. */
1816         if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) ==
1817              PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41)
1818                 sc->bge_tbi = 1;
1819
1820         if (sc->bge_tbi) {
1821                 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
1822                     bge_ifmedia_upd, bge_ifmedia_sts);
1823                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
1824                 ifmedia_add(&sc->bge_ifmedia,
1825                     IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
1826                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1827                 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
1828         } else {
1829                 /*
1830                  * Do transceiver setup.
1831                  */
1832                 if (mii_phy_probe(dev, &sc->bge_miibus,
1833                     bge_ifmedia_upd, bge_ifmedia_sts)) {
1834                         device_printf(dev, "MII without any PHY!\n");
1835                         bge_release_resources(sc);
1836                         bge_free_jumbo_mem(sc);
1837                         error = ENXIO;
1838                         goto fail;
1839                 }
1840         }
1841
1842         /*
1843          * When using the BCM5701 in PCI-X mode, data corruption has
1844          * been observed in the first few bytes of some received packets.
1845          * Aligning the packet buffer in memory eliminates the corruption.
1846          * Unfortunately, this misaligns the packet payloads.  On platforms
1847          * which do not support unaligned accesses, we will realign the
1848          * payloads by copying the received packets.
1849          */
1850         switch (sc->bge_chipid) {
1851         case BGE_CHIPID_BCM5701_A0:
1852         case BGE_CHIPID_BCM5701_B0:
1853         case BGE_CHIPID_BCM5701_B2:
1854         case BGE_CHIPID_BCM5701_B5:
1855                 /* If in PCI-X mode, work around the alignment bug. */
1856                 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
1857                     (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
1858                     BGE_PCISTATE_PCI_BUSSPEED)
1859                         sc->bge_rx_alignment_bug = 1;
1860                 break;
1861         }
1862
1863         /*
1864          * Call MI attach routine.
1865          */
1866         ether_ifattach(ifp, ether_addr);
1867
1868 fail:
1869         splx(s);
1870
1871         return(error);
1872 }
1873
1874 static int
1875 bge_detach(dev)
1876         device_t dev;
1877 {
1878         struct bge_softc *sc;
1879         struct ifnet *ifp;
1880         int s;
1881
1882         s = splimp();
1883
1884         sc = device_get_softc(dev);
1885         ifp = &sc->arpcom.ac_if;
1886
1887         ether_ifdetach(ifp);
1888         bge_stop(sc);
1889         bge_reset(sc);
1890
1891         if (sc->bge_tbi) {
1892                 ifmedia_removeall(&sc->bge_ifmedia);
1893         } else {
1894                 bus_generic_detach(dev);
1895                 device_delete_child(dev, sc->bge_miibus);
1896         }
1897
1898         bge_release_resources(sc);
1899         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1900                 bge_free_jumbo_mem(sc);
1901
1902         splx(s);
1903
1904         return(0);
1905 }
1906
1907 static void
1908 bge_release_resources(sc)
1909         struct bge_softc *sc;
1910 {
1911         device_t dev;
1912
1913         dev = sc->bge_dev;
1914
1915         if (sc->bge_vpd_prodname != NULL)
1916                 free(sc->bge_vpd_prodname, M_DEVBUF);
1917
1918         if (sc->bge_vpd_readonly != NULL)
1919                 free(sc->bge_vpd_readonly, M_DEVBUF);
1920
1921         if (sc->bge_intrhand != NULL)
1922                 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
1923
1924         if (sc->bge_irq != NULL)
1925                 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
1926
1927         if (sc->bge_res != NULL)
1928                 bus_release_resource(dev, SYS_RES_MEMORY,
1929                     BGE_PCI_BAR0, sc->bge_res);
1930
1931         if (sc->bge_rdata != NULL)
1932                 contigfree(sc->bge_rdata,
1933                     sizeof(struct bge_ring_data), M_DEVBUF);
1934
1935         return;
1936 }
1937
1938 static void
1939 bge_reset(sc)
1940         struct bge_softc *sc;
1941 {
1942         device_t dev;
1943         u_int32_t cachesize, command, pcistate;
1944         int i, val = 0;
1945
1946         dev = sc->bge_dev;
1947
1948         /* Save some important PCI state. */
1949         cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
1950         command = pci_read_config(dev, BGE_PCI_CMD, 4);
1951         pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
1952
1953         pci_write_config(dev, BGE_PCI_MISC_CTL,
1954             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1955             BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1956
1957         /* Issue global reset */
1958         bge_writereg_ind(sc, BGE_MISC_CFG,
1959             BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1));
1960
1961         DELAY(1000);
1962
1963         /* Reset some of the PCI state that got zapped by reset */
1964         pci_write_config(dev, BGE_PCI_MISC_CTL,
1965             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1966             BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1967         pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
1968         pci_write_config(dev, BGE_PCI_CMD, command, 4);
1969         bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
1970
1971         /*
1972          * Prevent PXE restart: write a magic number to the
1973          * general communications memory at 0xB50.
1974          */
1975         bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1976         /*
1977          * Poll the value location we just wrote until
1978          * we see the 1's complement of the magic number.
1979          * This indicates that the firmware initialization
1980          * is complete.
1981          */
1982         for (i = 0; i < BGE_TIMEOUT; i++) {
1983                 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
1984                 if (val == ~BGE_MAGIC_NUMBER)
1985                         break;
1986                 DELAY(10);
1987         }
1988         
1989         if (i == BGE_TIMEOUT) {
1990                 if_printf(&sc->arpcom.ac_if, "firmware handshake timed out\n");
1991                 return;
1992         }
1993
1994         /*
1995          * XXX Wait for the value of the PCISTATE register to
1996          * return to its original pre-reset state. This is a
1997          * fairly good indicator of reset completion. If we don't
1998          * wait for the reset to fully complete, trying to read
1999          * from the device's non-PCI registers may yield garbage
2000          * results.
2001          */
2002         for (i = 0; i < BGE_TIMEOUT; i++) {
2003                 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2004                         break;
2005                 DELAY(10);
2006         }
2007
2008         /* Enable memory arbiter. */
2009         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
2010                 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2011
2012         /* Fix up byte swapping */
2013         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME|
2014             BGE_MODECTL_BYTESWAP_DATA);
2015
2016         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2017
2018         DELAY(10000);
2019
2020         return;
2021 }
2022
2023 /*
2024  * Frame reception handling. This is called if there's a frame
2025  * on the receive return list.
2026  *
2027  * Note: we have to be able to handle two possibilities here:
2028  * 1) the frame is from the jumbo recieve ring
2029  * 2) the frame is from the standard receive ring
2030  */
2031
2032 static void
2033 bge_rxeof(sc)
2034         struct bge_softc *sc;
2035 {
2036         struct ifnet *ifp;
2037         int stdcnt = 0, jumbocnt = 0;
2038
2039         ifp = &sc->arpcom.ac_if;
2040
2041         while(sc->bge_rx_saved_considx !=
2042             sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) {
2043                 struct bge_rx_bd        *cur_rx;
2044                 u_int32_t               rxidx;
2045                 struct mbuf             *m = NULL;
2046                 u_int16_t               vlan_tag = 0;
2047                 int                     have_tag = 0;
2048
2049                 cur_rx =
2050             &sc->bge_rdata->bge_rx_return_ring[sc->bge_rx_saved_considx];
2051
2052                 rxidx = cur_rx->bge_idx;
2053                 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2054
2055                 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2056                         have_tag = 1;
2057                         vlan_tag = cur_rx->bge_vlan_tag;
2058                 }
2059
2060                 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2061                         BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2062                         m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2063                         sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2064                         jumbocnt++;
2065                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2066                                 ifp->if_ierrors++;
2067                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2068                                 continue;
2069                         }
2070                         if (bge_newbuf_jumbo(sc,
2071                             sc->bge_jumbo, NULL) == ENOBUFS) {
2072                                 ifp->if_ierrors++;
2073                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2074                                 continue;
2075                         }
2076                 } else {
2077                         BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2078                         m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2079                         sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2080                         stdcnt++;
2081                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2082                                 ifp->if_ierrors++;
2083                                 bge_newbuf_std(sc, sc->bge_std, m);
2084                                 continue;
2085                         }
2086                         if (bge_newbuf_std(sc, sc->bge_std,
2087                             NULL) == ENOBUFS) {
2088                                 ifp->if_ierrors++;
2089                                 bge_newbuf_std(sc, sc->bge_std, m);
2090                                 continue;
2091                         }
2092                 }
2093
2094                 ifp->if_ipackets++;
2095 #ifndef __i386__
2096                 /*
2097                  * The i386 allows unaligned accesses, but for other
2098                  * platforms we must make sure the payload is aligned.
2099                  */
2100                 if (sc->bge_rx_alignment_bug) {
2101                         bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2102                             cur_rx->bge_len);
2103                         m->m_data += ETHER_ALIGN;
2104                 }
2105 #endif
2106                 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2107                 m->m_pkthdr.rcvif = ifp;
2108
2109 #if 0 /* currently broken for some packets, possibly related to TCP options */
2110                 if (ifp->if_hwassist) {
2111                         m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2112                         if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2113                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2114                         if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2115                                 m->m_pkthdr.csum_data =
2116                                     cur_rx->bge_tcp_udp_csum;
2117                                 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
2118                         }
2119                 }
2120 #endif
2121
2122                 /*
2123                  * If we received a packet with a vlan tag, pass it
2124                  * to vlan_input() instead of ether_input().
2125                  */
2126                 if (have_tag) {
2127                         VLAN_INPUT_TAG(m, vlan_tag);
2128                         have_tag = vlan_tag = 0;
2129                         continue;
2130                 }
2131
2132                 (*ifp->if_input)(ifp, m);
2133         }
2134
2135         CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2136         if (stdcnt)
2137                 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2138         if (jumbocnt)
2139                 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2140
2141         return;
2142 }
2143
2144 static void
2145 bge_txeof(sc)
2146         struct bge_softc *sc;
2147 {
2148         struct bge_tx_bd *cur_tx = NULL;
2149         struct ifnet *ifp;
2150
2151         ifp = &sc->arpcom.ac_if;
2152
2153         /*
2154          * Go through our tx ring and free mbufs for those
2155          * frames that have been sent.
2156          */
2157         while (sc->bge_tx_saved_considx !=
2158             sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
2159                 u_int32_t               idx = 0;
2160
2161                 idx = sc->bge_tx_saved_considx;
2162                 cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
2163                 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2164                         ifp->if_opackets++;
2165                 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2166                         m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2167                         sc->bge_cdata.bge_tx_chain[idx] = NULL;
2168                 }
2169                 sc->bge_txcnt--;
2170                 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2171                 ifp->if_timer = 0;
2172         }
2173
2174         if (cur_tx != NULL)
2175                 ifp->if_flags &= ~IFF_OACTIVE;
2176
2177         return;
2178 }
2179
2180 static void
2181 bge_intr(xsc)
2182         void *xsc;
2183 {
2184         struct bge_softc *sc;
2185         struct ifnet *ifp;
2186         u_int32_t status;
2187
2188         sc = xsc;
2189         ifp = &sc->arpcom.ac_if;
2190
2191 #ifdef notdef
2192         /* Avoid this for now -- checking this register is expensive. */
2193         /* Make sure this is really our interrupt. */
2194         if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2195                 return;
2196 #endif
2197         /* Ack interrupt and stop others from occuring. */
2198         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2199
2200         /*
2201          * Process link state changes.
2202          * Grrr. The link status word in the status block does
2203          * not work correctly on the BCM5700 rev AX and BX chips,
2204          * according to all available information. Hence, we have
2205          * to enable MII interrupts in order to properly obtain
2206          * async link changes. Unfortunately, this also means that
2207          * we have to read the MAC status register to detect link
2208          * changes, thereby adding an additional register access to
2209          * the interrupt handler.
2210          */
2211
2212         if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
2213                 status = CSR_READ_4(sc, BGE_MAC_STS);
2214                 if (status & BGE_MACSTAT_MI_INTERRUPT) {
2215                         sc->bge_link = 0;
2216                         callout_stop(&sc->bge_stat_timer);
2217                         bge_tick(sc);
2218                         /* Clear the interrupt */
2219                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2220                             BGE_EVTENB_MI_INTERRUPT);
2221                         bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
2222                         bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
2223                             BRGPHY_INTRS);
2224                 }
2225         } else {
2226                 if ((sc->bge_rdata->bge_status_block.bge_status &
2227                     BGE_STATFLAG_UPDATED) &&
2228                     (sc->bge_rdata->bge_status_block.bge_status &
2229                     BGE_STATFLAG_LINKSTATE_CHANGED)) {
2230                         sc->bge_rdata->bge_status_block.bge_status &=
2231                                 ~(BGE_STATFLAG_UPDATED|
2232                                 BGE_STATFLAG_LINKSTATE_CHANGED);
2233                         /*
2234                          * Sometimes PCS encoding errors are detected in
2235                          * TBI mode (on fiber NICs), and for some reason
2236                          * the chip will signal them as link changes.
2237                          * If we get a link change event, but the 'PCS
2238                          * encoding error' bit in the MAC status register
2239                          * is set, don't bother doing a link check.
2240                          * This avoids spurious "gigabit link up" messages
2241                          * that sometimes appear on fiber NICs during
2242                          * periods of heavy traffic. (There should be no
2243                          * effect on copper NICs.)
2244                          */
2245                         status = CSR_READ_4(sc, BGE_MAC_STS);
2246                         if (!(status & (BGE_MACSTAT_PORT_DECODE_ERROR|
2247                             BGE_MACSTAT_MI_COMPLETE))) {
2248                                 sc->bge_link = 0;
2249                                 callout_stop(&sc->bge_stat_timer);
2250                                 bge_tick(sc);
2251                         }
2252                         sc->bge_link = 0;
2253                         callout_stop(&sc->bge_stat_timer);
2254                         bge_tick(sc);
2255                         /* Clear the interrupt */
2256                         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
2257                             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
2258                             BGE_MACSTAT_LINK_CHANGED);
2259
2260                         /* Force flush the status block cached by PCI bridge */
2261                         CSR_READ_4(sc, BGE_MBX_IRQ0_LO);
2262                 }
2263         }
2264
2265         if (ifp->if_flags & IFF_RUNNING) {
2266                 /* Check RX return ring producer/consumer */
2267                 bge_rxeof(sc);
2268
2269                 /* Check TX ring producer/consumer */
2270                 bge_txeof(sc);
2271         }
2272
2273         bge_handle_events(sc);
2274
2275         /* Re-enable interrupts. */
2276         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2277
2278         if ((ifp->if_flags & IFF_RUNNING) && !ifq_is_empty(&ifp->if_snd))
2279                 bge_start(ifp);
2280
2281         return;
2282 }
2283
2284 static void
2285 bge_tick(xsc)
2286         void *xsc;
2287 {
2288         struct bge_softc *sc;
2289         struct mii_data *mii = NULL;
2290         struct ifmedia *ifm = NULL;
2291         struct ifnet *ifp;
2292         int s;
2293
2294         sc = xsc;
2295         ifp = &sc->arpcom.ac_if;
2296
2297         s = splimp();
2298
2299         if (sc->bge_asicrev == BGE_ASICREV_BCM5705)
2300                 bge_stats_update_regs(sc);
2301         else
2302                 bge_stats_update(sc);
2303         callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2304         if (sc->bge_link) {
2305                 splx(s);
2306                 return;
2307         }
2308
2309         if (sc->bge_tbi) {
2310                 ifm = &sc->bge_ifmedia;
2311                 if (CSR_READ_4(sc, BGE_MAC_STS) &
2312                     BGE_MACSTAT_TBI_PCS_SYNCHED) {
2313                         sc->bge_link++;
2314                         CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
2315                         if_printf(ifp, "gigabit link up\n");
2316                         if (!ifq_is_empty(&ifp->if_snd))
2317                                 bge_start(ifp);
2318                 }
2319                 splx(s);
2320                 return;
2321         }
2322
2323         mii = device_get_softc(sc->bge_miibus);
2324         mii_tick(mii);
2325  
2326         if (!sc->bge_link) {
2327                 mii_pollstat(mii);
2328                 if (mii->mii_media_status & IFM_ACTIVE &&
2329                     IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2330                         sc->bge_link++;
2331                         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
2332                             IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
2333                                 if_printf(ifp, "gigabit link up\n");
2334                         if (!ifq_is_empty(&ifp->if_snd))
2335                                 bge_start(ifp);
2336                 }
2337         }
2338
2339         splx(s);
2340
2341         return;
2342 }
2343
2344 static void
2345 bge_stats_update_regs(sc)
2346         struct bge_softc *sc;
2347 {
2348         struct ifnet *ifp;
2349         struct bge_mac_stats_regs stats;
2350         u_int32_t *s;
2351         int i;
2352
2353         ifp = &sc->arpcom.ac_if;
2354
2355         s = (u_int32_t *)&stats;
2356         for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2357                 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2358                 s++;
2359         }
2360
2361         ifp->if_collisions +=
2362            (stats.dot3StatsSingleCollisionFrames +
2363            stats.dot3StatsMultipleCollisionFrames +
2364            stats.dot3StatsExcessiveCollisions +
2365            stats.dot3StatsLateCollisions) -
2366            ifp->if_collisions;
2367
2368         return;
2369 }
2370
2371 static void
2372 bge_stats_update(sc)
2373         struct bge_softc *sc;
2374 {
2375         struct ifnet *ifp;
2376         struct bge_stats *stats;
2377
2378         ifp = &sc->arpcom.ac_if;
2379
2380         stats = (struct bge_stats *)(sc->bge_vhandle +
2381             BGE_MEMWIN_START + BGE_STATS_BLOCK);
2382
2383         ifp->if_collisions +=
2384            (stats->txstats.dot3StatsSingleCollisionFrames.bge_addr_lo +
2385            stats->txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo +
2386            stats->txstats.dot3StatsExcessiveCollisions.bge_addr_lo +
2387            stats->txstats.dot3StatsLateCollisions.bge_addr_lo) -
2388            ifp->if_collisions;
2389
2390 #ifdef notdef
2391         ifp->if_collisions +=
2392            (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2393            sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2394            sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2395            sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2396            ifp->if_collisions;
2397 #endif
2398
2399         return;
2400 }
2401
2402 /*
2403  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
2404  * pointers to descriptors.
2405  */
2406 static int
2407 bge_encap(sc, m_head, txidx)
2408         struct bge_softc *sc;
2409         struct mbuf *m_head;
2410         u_int32_t *txidx;
2411 {
2412         struct bge_tx_bd        *f = NULL;
2413         struct mbuf             *m;
2414         u_int32_t               frag, cur, cnt = 0;
2415         u_int16_t               csum_flags = 0;
2416         struct ifvlan           *ifv = NULL;
2417
2418         if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
2419             m_head->m_pkthdr.rcvif != NULL &&
2420             m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
2421                 ifv = m_head->m_pkthdr.rcvif->if_softc;
2422
2423         m = m_head;
2424         cur = frag = *txidx;
2425
2426         if (m_head->m_pkthdr.csum_flags) {
2427                 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2428                         csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2429                 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2430                         csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2431                 if (m_head->m_flags & M_LASTFRAG)
2432                         csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2433                 else if (m_head->m_flags & M_FRAG)
2434                         csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2435         }
2436         /*
2437          * Start packing the mbufs in this chain into
2438          * the fragment pointers. Stop when we run out
2439          * of fragments or hit the end of the mbuf chain.
2440          */
2441         for (m = m_head; m != NULL; m = m->m_next) {
2442                 if (m->m_len != 0) {
2443                         f = &sc->bge_rdata->bge_tx_ring[frag];
2444                         if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
2445                                 break;
2446                         BGE_HOSTADDR(f->bge_addr,
2447                             vtophys(mtod(m, vm_offset_t)));
2448                         f->bge_len = m->m_len;
2449                         f->bge_flags = csum_flags;
2450                         if (ifv != NULL) {
2451                                 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2452                                 f->bge_vlan_tag = ifv->ifv_tag;
2453                         } else {
2454                                 f->bge_vlan_tag = 0;
2455                         }
2456                         /*
2457                          * Sanity check: avoid coming within 16 descriptors
2458                          * of the end of the ring.
2459                          */
2460                         if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16)
2461                                 return(ENOBUFS);
2462                         cur = frag;
2463                         BGE_INC(frag, BGE_TX_RING_CNT);
2464                         cnt++;
2465                 }
2466         }
2467
2468         if (m != NULL)
2469                 return(ENOBUFS);
2470
2471         if (frag == sc->bge_tx_saved_considx)
2472                 return(ENOBUFS);
2473
2474         sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
2475         sc->bge_cdata.bge_tx_chain[cur] = m_head;
2476         sc->bge_txcnt += cnt;
2477
2478         *txidx = frag;
2479
2480         return(0);
2481 }
2482
2483 /*
2484  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2485  * to the mbuf data regions directly in the transmit descriptors.
2486  */
2487 static void
2488 bge_start(ifp)
2489         struct ifnet *ifp;
2490 {
2491         struct bge_softc *sc;
2492         struct mbuf *m_head = NULL;
2493         u_int32_t prodidx = 0;
2494
2495         sc = ifp->if_softc;
2496
2497         if (!sc->bge_link)
2498                 return;
2499
2500         prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
2501
2502         while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
2503                 m_head = ifq_poll(&ifp->if_snd);
2504                 if (m_head == NULL)
2505                         break;
2506
2507                 /*
2508                  * XXX
2509                  * safety overkill.  If this is a fragmented packet chain
2510                  * with delayed TCP/UDP checksums, then only encapsulate
2511                  * it if we have enough descriptors to handle the entire
2512                  * chain at once.
2513                  * (paranoia -- may not actually be needed)
2514                  */
2515                 if (m_head->m_flags & M_FIRSTFRAG &&
2516                     m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
2517                         if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2518                             m_head->m_pkthdr.csum_data + 16) {
2519                                 ifp->if_flags |= IFF_OACTIVE;
2520                                 break;
2521                         }
2522                 }
2523
2524                 /*
2525                  * Pack the data into the transmit ring. If we
2526                  * don't have room, set the OACTIVE flag and wait
2527                  * for the NIC to drain the ring.
2528                  */
2529                 if (bge_encap(sc, m_head, &prodidx)) {
2530                         ifp->if_flags |= IFF_OACTIVE;
2531                         break;
2532                 }
2533                 m_head = ifq_dequeue(&ifp->if_snd);
2534
2535                 BPF_MTAP(ifp, m_head);
2536         }
2537
2538         /* Transmit */
2539         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2540         /* 5700 b2 errata */
2541         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
2542                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2543
2544         /*
2545          * Set a timeout in case the chip goes out to lunch.
2546          */
2547         ifp->if_timer = 5;
2548
2549         return;
2550 }
2551
2552 static void
2553 bge_init(xsc)
2554         void *xsc;
2555 {
2556         struct bge_softc *sc = xsc;
2557         struct ifnet *ifp;
2558         u_int16_t *m;
2559         int s;
2560
2561         s = splimp();
2562
2563         ifp = &sc->arpcom.ac_if;
2564
2565         if (ifp->if_flags & IFF_RUNNING) {
2566                 splx(s);
2567                 return;
2568         }
2569
2570         /* Cancel pending I/O and flush buffers. */
2571         bge_stop(sc);
2572         bge_reset(sc);
2573         bge_chipinit(sc);
2574
2575         /*
2576          * Init the various state machines, ring
2577          * control blocks and firmware.
2578          */
2579         if (bge_blockinit(sc)) {
2580                 if_printf(ifp, "initialization failure\n");
2581                 splx(s);
2582                 return;
2583         }
2584
2585         ifp = &sc->arpcom.ac_if;
2586
2587         /* Specify MTU. */
2588         CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
2589             ETHER_HDR_LEN + ETHER_CRC_LEN);
2590
2591         /* Load our MAC address. */
2592         m = (u_int16_t *)&sc->arpcom.ac_enaddr[0];
2593         CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2594         CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2595
2596         /* Enable or disable promiscuous mode as needed. */
2597         if (ifp->if_flags & IFF_PROMISC) {
2598                 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
2599         } else {
2600                 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
2601         }
2602
2603         /* Program multicast filter. */
2604         bge_setmulti(sc);
2605
2606         /* Init RX ring. */
2607         bge_init_rx_ring_std(sc);
2608
2609         /*
2610          * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
2611          * memory to insure that the chip has in fact read the first
2612          * entry of the ring.
2613          */
2614         if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
2615                 u_int32_t               v, i;
2616                 for (i = 0; i < 10; i++) {
2617                         DELAY(20);
2618                         v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
2619                         if (v == (MCLBYTES - ETHER_ALIGN))
2620                                 break;
2621                 }
2622                 if (i == 10)
2623                         if_printf(ifp, "5705 A0 chip failed to load RX ring\n");
2624         }
2625
2626         /* Init jumbo RX ring. */
2627         if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2628                 bge_init_rx_ring_jumbo(sc);
2629
2630         /* Init our RX return ring index */
2631         sc->bge_rx_saved_considx = 0;
2632
2633         /* Init TX ring. */
2634         bge_init_tx_ring(sc);
2635
2636         /* Turn on transmitter */
2637         BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
2638
2639         /* Turn on receiver */
2640         BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2641
2642         /* Tell firmware we're alive. */
2643         BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2644
2645         /* Enable host interrupts. */
2646         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
2647         BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2648         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2649
2650         bge_ifmedia_upd(ifp);
2651
2652         ifp->if_flags |= IFF_RUNNING;
2653         ifp->if_flags &= ~IFF_OACTIVE;
2654
2655         splx(s);
2656
2657         callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2658 }
2659
2660 /*
2661  * Set media options.
2662  */
2663 static int
2664 bge_ifmedia_upd(ifp)
2665         struct ifnet *ifp;
2666 {
2667         struct bge_softc *sc;
2668         struct mii_data *mii;
2669         struct ifmedia *ifm;
2670
2671         sc = ifp->if_softc;
2672         ifm = &sc->bge_ifmedia;
2673
2674         /* If this is a 1000baseX NIC, enable the TBI port. */
2675         if (sc->bge_tbi) {
2676                 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2677                         return(EINVAL);
2678                 switch(IFM_SUBTYPE(ifm->ifm_media)) {
2679                 case IFM_AUTO:
2680                         break;
2681                 case IFM_1000_SX:
2682                         if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
2683                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
2684                                     BGE_MACMODE_HALF_DUPLEX);
2685                         } else {
2686                                 BGE_SETBIT(sc, BGE_MAC_MODE,
2687                                     BGE_MACMODE_HALF_DUPLEX);
2688                         }
2689                         break;
2690                 default:
2691                         return(EINVAL);
2692                 }
2693                 return(0);
2694         }
2695
2696         mii = device_get_softc(sc->bge_miibus);
2697         sc->bge_link = 0;
2698         if (mii->mii_instance) {
2699                 struct mii_softc *miisc;
2700                 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
2701                     miisc = LIST_NEXT(miisc, mii_list))
2702                         mii_phy_reset(miisc);
2703         }
2704         mii_mediachg(mii);
2705
2706         return(0);
2707 }
2708
2709 /*
2710  * Report current media status.
2711  */
2712 static void
2713 bge_ifmedia_sts(ifp, ifmr)
2714         struct ifnet *ifp;
2715         struct ifmediareq *ifmr;
2716 {
2717         struct bge_softc *sc;
2718         struct mii_data *mii;
2719
2720         sc = ifp->if_softc;
2721
2722         if (sc->bge_tbi) {
2723                 ifmr->ifm_status = IFM_AVALID;
2724                 ifmr->ifm_active = IFM_ETHER;
2725                 if (CSR_READ_4(sc, BGE_MAC_STS) &
2726                     BGE_MACSTAT_TBI_PCS_SYNCHED)
2727                         ifmr->ifm_status |= IFM_ACTIVE;
2728                 ifmr->ifm_active |= IFM_1000_SX;
2729                 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
2730                         ifmr->ifm_active |= IFM_HDX;    
2731                 else
2732                         ifmr->ifm_active |= IFM_FDX;
2733                 return;
2734         }
2735
2736         mii = device_get_softc(sc->bge_miibus);
2737         mii_pollstat(mii);
2738         ifmr->ifm_active = mii->mii_media_active;
2739         ifmr->ifm_status = mii->mii_media_status;
2740
2741         return;
2742 }
2743
2744 static int
2745 bge_ioctl(ifp, command, data, cr)
2746         struct ifnet *ifp;
2747         u_long command;
2748         caddr_t data;
2749         struct ucred *cr;
2750 {
2751         struct bge_softc *sc = ifp->if_softc;
2752         struct ifreq *ifr = (struct ifreq *) data;
2753         int s, mask, error = 0;
2754         struct mii_data *mii;
2755
2756         s = splimp();
2757
2758         switch(command) {
2759         case SIOCSIFADDR:
2760         case SIOCGIFADDR:
2761                 error = ether_ioctl(ifp, command, data);
2762                 break;
2763         case SIOCSIFMTU:
2764                 /* Disallow jumbo frames on 5705. */
2765                 if ((sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2766                     ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
2767                         error = EINVAL;
2768                 else {
2769                         ifp->if_mtu = ifr->ifr_mtu;
2770                         ifp->if_flags &= ~IFF_RUNNING;
2771                         bge_init(sc);
2772                 }
2773                 break;
2774         case SIOCSIFFLAGS:
2775                 if (ifp->if_flags & IFF_UP) {
2776                         /*
2777                          * If only the state of the PROMISC flag changed,
2778                          * then just use the 'set promisc mode' command
2779                          * instead of reinitializing the entire NIC. Doing
2780                          * a full re-init means reloading the firmware and
2781                          * waiting for it to start up, which may take a
2782                          * second or two.
2783                          */
2784                         if (ifp->if_flags & IFF_RUNNING &&
2785                             ifp->if_flags & IFF_PROMISC &&
2786                             !(sc->bge_if_flags & IFF_PROMISC)) {
2787                                 BGE_SETBIT(sc, BGE_RX_MODE,
2788                                     BGE_RXMODE_RX_PROMISC);
2789                         } else if (ifp->if_flags & IFF_RUNNING &&
2790                             !(ifp->if_flags & IFF_PROMISC) &&
2791                             sc->bge_if_flags & IFF_PROMISC) {
2792                                 BGE_CLRBIT(sc, BGE_RX_MODE,
2793                                     BGE_RXMODE_RX_PROMISC);
2794                         } else
2795                                 bge_init(sc);
2796                 } else {
2797                         if (ifp->if_flags & IFF_RUNNING) {
2798                                 bge_stop(sc);
2799                         }
2800                 }
2801                 sc->bge_if_flags = ifp->if_flags;
2802                 error = 0;
2803                 break;
2804         case SIOCADDMULTI:
2805         case SIOCDELMULTI:
2806                 if (ifp->if_flags & IFF_RUNNING) {
2807                         bge_setmulti(sc);
2808                         error = 0;
2809                 }
2810                 break;
2811         case SIOCSIFMEDIA:
2812         case SIOCGIFMEDIA:
2813                 if (sc->bge_tbi) {
2814                         error = ifmedia_ioctl(ifp, ifr,
2815                             &sc->bge_ifmedia, command);
2816                 } else {
2817                         mii = device_get_softc(sc->bge_miibus);
2818                         error = ifmedia_ioctl(ifp, ifr,
2819                             &mii->mii_media, command);
2820                 }
2821                 break;
2822         case SIOCSIFCAP:
2823                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2824                 if (mask & IFCAP_HWCSUM) {
2825                         if (IFCAP_HWCSUM & ifp->if_capenable)
2826                                 ifp->if_capenable &= ~IFCAP_HWCSUM;
2827                         else
2828                                 ifp->if_capenable |= IFCAP_HWCSUM;
2829                 }
2830                 error = 0;
2831                 break;
2832         default:
2833                 error = EINVAL;
2834                 break;
2835         }
2836
2837         (void)splx(s);
2838
2839         return(error);
2840 }
2841
2842 static void
2843 bge_watchdog(ifp)
2844         struct ifnet *ifp;
2845 {
2846         struct bge_softc *sc;
2847
2848         sc = ifp->if_softc;
2849
2850         if_printf(ifp, "watchdog timeout -- resetting\n");
2851
2852         ifp->if_flags &= ~IFF_RUNNING;
2853         bge_init(sc);
2854
2855         ifp->if_oerrors++;
2856
2857         return;
2858 }
2859
2860 /*
2861  * Stop the adapter and free any mbufs allocated to the
2862  * RX and TX lists.
2863  */
2864 static void
2865 bge_stop(sc)
2866         struct bge_softc *sc;
2867 {
2868         struct ifnet *ifp;
2869         struct ifmedia_entry *ifm;
2870         struct mii_data *mii = NULL;
2871         int mtmp, itmp;
2872
2873         ifp = &sc->arpcom.ac_if;
2874
2875         if (!sc->bge_tbi)
2876                 mii = device_get_softc(sc->bge_miibus);
2877
2878         callout_stop(&sc->bge_stat_timer);
2879
2880         /*
2881          * Disable all of the receiver blocks
2882          */
2883         BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2884         BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2885         BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2886         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
2887                 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2888         BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
2889         BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2890         BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
2891
2892         /*
2893          * Disable all of the transmit blocks
2894          */
2895         BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2896         BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2897         BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2898         BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
2899         BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
2900         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
2901                 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2902         BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2903
2904         /*
2905          * Shut down all of the memory managers and related
2906          * state machines.
2907          */
2908         BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
2909         BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
2910         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
2911                 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2912         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
2913         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
2914         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
2915                 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
2916                 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2917         }
2918
2919         /* Disable host interrupts. */
2920         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2921         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2922
2923         /*
2924          * Tell firmware we're shutting down.
2925          */
2926         BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2927
2928         /* Free the RX lists. */
2929         bge_free_rx_ring_std(sc);
2930
2931         /* Free jumbo RX list. */
2932         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
2933                 bge_free_rx_ring_jumbo(sc);
2934
2935         /* Free TX buffers. */
2936         bge_free_tx_ring(sc);
2937
2938         /*
2939          * Isolate/power down the PHY, but leave the media selection
2940          * unchanged so that things will be put back to normal when
2941          * we bring the interface back up.
2942          */
2943         if (!sc->bge_tbi) {
2944                 itmp = ifp->if_flags;
2945                 ifp->if_flags |= IFF_UP;
2946                 ifm = mii->mii_media.ifm_cur;
2947                 mtmp = ifm->ifm_media;
2948                 ifm->ifm_media = IFM_ETHER|IFM_NONE;
2949                 mii_mediachg(mii);
2950                 ifm->ifm_media = mtmp;
2951                 ifp->if_flags = itmp;
2952         }
2953
2954         sc->bge_link = 0;
2955
2956         sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
2957
2958         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2959
2960         return;
2961 }
2962
2963 /*
2964  * Stop all chip I/O so that the kernel's probe routines don't
2965  * get confused by errant DMAs when rebooting.
2966  */
2967 static void
2968 bge_shutdown(dev)
2969         device_t dev;
2970 {
2971         struct bge_softc *sc;
2972
2973         sc = device_get_softc(dev);
2974
2975         bge_stop(sc); 
2976         bge_reset(sc);
2977
2978         return;
2979 }