Use bus_alloc_resource_any. Use pci helper functions, don't roll them
[dragonfly.git] / sys / dev / netif / bge / if_bge.c
1 /*
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.29 2003/12/01 21:06:59 ambrisko Exp $
34  * $DragonFly: src/sys/dev/netif/bge/if_bge.c,v 1.30 2005/05/21 07:38:41 joerg Exp $
35  *
36  */
37
38 /*
39  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
40  * 
41  * Written by Bill Paul <wpaul@windriver.com>
42  * Senior Engineer, Wind River Systems
43  */
44
45 /*
46  * The Broadcom BCM5700 is based on technology originally developed by
47  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
49  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
51  * frames, highly configurable RX filtering, and 16 RX and TX queues
52  * (which, along with RX filter rules, can be used for QOS applications).
53  * Other features, such as TCP segmentation, may be available as part
54  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55  * firmware images can be stored in hardware and need not be compiled
56  * into the driver.
57  *
58  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
60  * 
61  * The BCM5701 is a single-chip solution incorporating both the BCM5700
62  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63  * does not support external SSRAM.
64  *
65  * Broadcom also produces a variation of the BCM5700 under the "Altima"
66  * brand name, which is functionally similar but lacks PCI-X support.
67  *
68  * Without external SSRAM, you can only have at most 4 TX rings,
69  * and the use of the mini RX ring is disabled. This seems to imply
70  * that these features are simply not available on the BCM5701. As a
71  * result, this driver does not implement any support for the mini RX
72  * ring.
73  */
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/sockio.h>
78 #include <sys/mbuf.h>
79 #include <sys/malloc.h>
80 #include <sys/kernel.h>
81 #include <sys/socket.h>
82 #include <sys/queue.h>
83
84 #include <net/if.h>
85 #include <net/ifq_var.h>
86 #include <net/if_arp.h>
87 #include <net/ethernet.h>
88 #include <net/if_dl.h>
89 #include <net/if_media.h>
90
91 #include <net/bpf.h>
92
93 #include <net/if_types.h>
94 #include <net/vlan/if_vlan_var.h>
95
96 #include <netinet/in_systm.h>
97 #include <netinet/in.h>
98 #include <netinet/ip.h>
99
100 #include <vm/vm.h>              /* for vtophys */
101 #include <vm/pmap.h>            /* for vtophys */
102 #include <machine/clock.h>      /* for DELAY */
103 #include <machine/bus_memio.h>
104 #include <machine/bus.h>
105 #include <machine/resource.h>
106 #include <sys/bus.h>
107 #include <sys/rman.h>
108
109 #include <dev/netif/mii_layer/mii.h>
110 #include <dev/netif/mii_layer/miivar.h>
111 #include <dev/netif/mii_layer/miidevs.h>
112 #include <dev/netif/mii_layer/brgphyreg.h>
113
114 #include <bus/pci/pcidevs.h>
115 #include <bus/pci/pcireg.h>
116 #include <bus/pci/pcivar.h>
117
118 #include "if_bgereg.h"
119
120 #define BGE_CSUM_FEATURES       (CSUM_IP | CSUM_TCP | CSUM_UDP)
121
122 /* "controller miibus0" required.  See GENERIC if you get errors here. */
123 #include "miibus_if.h"
124
125 /*
126  * Various supported device vendors/types and their names. Note: the
127  * spec seems to indicate that the hardware still has Alteon's vendor
128  * ID burned into it, though it will always be overriden by the vendor
129  * ID in the EEPROM. Just to be safe, we cover all possibilities.
130  */
131 #define BGE_DEVDESC_MAX         64      /* Maximum device description length */
132
133 static struct bge_type bge_devs[] = {
134         { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700,
135                 "Broadcom BCM5700 Gigabit Ethernet" },
136         { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700,
137                 "Broadcom BCM5701 Gigabit Ethernet" },
138         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700,
139                 "Broadcom BCM5700 Gigabit Ethernet" },
140         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701,
141                 "Broadcom BCM5701 Gigabit Ethernet" },
142         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X,
143                 "Broadcom BCM5702X Gigabit Ethernet" },
144         { PCI_VENDOR_BROADCOM, BCOM_DEVICEID_BCM5702X,
145                 "Broadcom BCM5702X Gigabit Ethernet" },
146         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X,
147                 "Broadcom BCM5703X Gigabit Ethernet" },
148         { PCI_VENDOR_BROADCOM, BCOM_DEVICEID_BCM5703X,
149                 "Broadcom BCM5703X Gigabit Ethernet" },
150         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C,
151                 "Broadcom BCM5704C Dual Gigabit Ethernet" },
152         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S,
153                 "Broadcom BCM5704S Dual Gigabit Ethernet" },
154         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705,
155                 "Broadcom BCM5705 Gigabit Ethernet" },
156         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M,
157                 "Broadcom BCM5705M Gigabit Ethernet" },
158         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705_ALT,
159                 "Broadcom BCM5705M Gigabit Ethernet" },
160         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782,
161                 "Broadcom BCM5782 Gigabit Ethernet" },
162         { PCI_VENDOR_BROADCOM, BCOM_DEVICEID_BCM5788,
163                 "Broadcom BCM5788 Gigabit Ethernet" },
164         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901,
165                 "Broadcom BCM5901 Fast Ethernet" },
166         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2,
167                 "Broadcom BCM5901A2 Fast Ethernet" },
168         { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
169                 "SysKonnect Gigabit Ethernet" },
170         { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000,
171                 "Altima AC1000 Gigabit Ethernet" },
172         { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001,
173                 "Altima AC1002 Gigabit Ethernet" },
174         { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100,
175                 "Altima AC9100 Gigabit Ethernet" },
176         { 0, 0, NULL }
177 };
178
179 static int bge_probe            (device_t);
180 static int bge_attach           (device_t);
181 static int bge_detach           (device_t);
182 static void bge_release_resources
183                                 (struct bge_softc *);
184 static void bge_txeof           (struct bge_softc *);
185 static void bge_rxeof           (struct bge_softc *);
186
187 static void bge_tick            (void *);
188 static void bge_stats_update    (struct bge_softc *);
189 static void bge_stats_update_regs
190                                 (struct bge_softc *);
191 static int bge_encap            (struct bge_softc *, struct mbuf *,
192                                         u_int32_t *);
193
194 static void bge_intr            (void *);
195 static void bge_start           (struct ifnet *);
196 static int bge_ioctl            (struct ifnet *, u_long, caddr_t,
197                                         struct ucred *);
198 static void bge_init            (void *);
199 static void bge_stop            (struct bge_softc *);
200 static void bge_watchdog                (struct ifnet *);
201 static void bge_shutdown                (device_t);
202 static int bge_ifmedia_upd      (struct ifnet *);
203 static void bge_ifmedia_sts     (struct ifnet *, struct ifmediareq *);
204
205 static u_int8_t bge_eeprom_getbyte      (struct bge_softc *,
206                                                 int, u_int8_t *);
207 static int bge_read_eeprom      (struct bge_softc *, caddr_t, int, int);
208
209 static u_int32_t bge_crc        (caddr_t);
210 static void bge_setmulti        (struct bge_softc *);
211
212 static void bge_handle_events   (struct bge_softc *);
213 static int bge_alloc_jumbo_mem  (struct bge_softc *);
214 static void bge_free_jumbo_mem  (struct bge_softc *);
215 static void *bge_jalloc         (struct bge_softc *);
216 static void bge_jfree           (caddr_t, u_int);
217 static void bge_jref            (caddr_t, u_int);
218 static int bge_newbuf_std       (struct bge_softc *, int, struct mbuf *);
219 static int bge_newbuf_jumbo     (struct bge_softc *, int, struct mbuf *);
220 static int bge_init_rx_ring_std (struct bge_softc *);
221 static void bge_free_rx_ring_std        (struct bge_softc *);
222 static int bge_init_rx_ring_jumbo       (struct bge_softc *);
223 static void bge_free_rx_ring_jumbo      (struct bge_softc *);
224 static void bge_free_tx_ring    (struct bge_softc *);
225 static int bge_init_tx_ring     (struct bge_softc *);
226
227 static int bge_chipinit         (struct bge_softc *);
228 static int bge_blockinit        (struct bge_softc *);
229
230 #ifdef notdef
231 static u_int8_t bge_vpd_readbyte (struct bge_softc *, int);
232 static void bge_vpd_read_res    (struct bge_softc *,
233                                         struct vpd_res *, int);
234 static void bge_vpd_read        (struct bge_softc *);
235 #endif
236
237 static u_int32_t bge_readmem_ind
238                                 (struct bge_softc *, int);
239 static void bge_writemem_ind    (struct bge_softc *, int, int);
240 #ifdef notdef
241 static u_int32_t bge_readreg_ind
242                                 (struct bge_softc *, int);
243 #endif
244 static void bge_writereg_ind    (struct bge_softc *, int, int);
245
246 static int bge_miibus_readreg   (device_t, int, int);
247 static int bge_miibus_writereg  (device_t, int, int, int);
248 static void bge_miibus_statchg  (device_t);
249
250 static void bge_reset           (struct bge_softc *);
251
252 static device_method_t bge_methods[] = {
253         /* Device interface */
254         DEVMETHOD(device_probe,         bge_probe),
255         DEVMETHOD(device_attach,        bge_attach),
256         DEVMETHOD(device_detach,        bge_detach),
257         DEVMETHOD(device_shutdown,      bge_shutdown),
258
259         /* bus interface */
260         DEVMETHOD(bus_print_child,      bus_generic_print_child),
261         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
262
263         /* MII interface */
264         DEVMETHOD(miibus_readreg,       bge_miibus_readreg),
265         DEVMETHOD(miibus_writereg,      bge_miibus_writereg),
266         DEVMETHOD(miibus_statchg,       bge_miibus_statchg),
267
268         { 0, 0 }
269 };
270
271 static driver_t bge_driver = {
272         "bge",
273         bge_methods,
274         sizeof(struct bge_softc)
275 };
276
277 static devclass_t bge_devclass;
278
279 DECLARE_DUMMY_MODULE(if_bge);
280 DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, 0, 0);
281 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
282
283 static u_int32_t
284 bge_readmem_ind(sc, off)
285         struct bge_softc *sc;
286         int off;
287 {
288         device_t dev;
289
290         dev = sc->bge_dev;
291
292         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
293         return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
294 }
295
296 static void
297 bge_writemem_ind(sc, off, val)
298         struct bge_softc *sc;
299         int off, val;
300 {
301         device_t dev;
302
303         dev = sc->bge_dev;
304
305         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
306         pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
307
308         return;
309 }
310
311 #ifdef notdef
312 static u_int32_t
313 bge_readreg_ind(sc, off)
314         struct bge_softc *sc;
315         int off;
316 {
317         device_t dev;
318
319         dev = sc->bge_dev;
320
321         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
322         return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
323 }
324 #endif
325
326 static void
327 bge_writereg_ind(sc, off, val)
328         struct bge_softc *sc;
329         int off, val;
330 {
331         device_t dev;
332
333         dev = sc->bge_dev;
334
335         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
336         pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
337
338         return;
339 }
340
341 #ifdef notdef
342 static u_int8_t
343 bge_vpd_readbyte(sc, addr)
344         struct bge_softc *sc;
345         int addr;
346 {
347         int i;
348         device_t dev;
349         u_int32_t val;
350
351         dev = sc->bge_dev;
352         pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
353         for (i = 0; i < BGE_TIMEOUT * 10; i++) {
354                 DELAY(10);
355                 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
356                         break;
357         }
358
359         if (i == BGE_TIMEOUT) {
360                 device_printf(sc->bge_dev, "VPD read timed out\n");
361                 return(0);
362         }
363
364         val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
365
366         return((val >> ((addr % 4) * 8)) & 0xFF);
367 }
368
369 static void
370 bge_vpd_read_res(sc, res, addr)
371         struct bge_softc *sc;
372         struct vpd_res *res;
373         int addr;
374 {
375         int i;
376         u_int8_t *ptr;
377
378         ptr = (u_int8_t *)res;
379         for (i = 0; i < sizeof(struct vpd_res); i++)
380                 ptr[i] = bge_vpd_readbyte(sc, i + addr);
381
382         return;
383 }
384
385 static void
386 bge_vpd_read(sc)
387         struct bge_softc *sc;
388 {
389         int pos = 0, i;
390         struct vpd_res res;
391
392         if (sc->bge_vpd_prodname != NULL)
393                 free(sc->bge_vpd_prodname, M_DEVBUF);
394         if (sc->bge_vpd_readonly != NULL)
395                 free(sc->bge_vpd_readonly, M_DEVBUF);
396         sc->bge_vpd_prodname = NULL;
397         sc->bge_vpd_readonly = NULL;
398
399         bge_vpd_read_res(sc, &res, pos);
400
401         if (res.vr_id != VPD_RES_ID) {
402                 device_printf(sc->bge_dev,
403                               "bad VPD resource id: expected %x got %x\n",
404                               VPD_RES_ID, res.vr_id);
405                 return;
406         }
407
408         pos += sizeof(res);
409         sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_INTWAIT);
410         for (i = 0; i < res.vr_len; i++)
411                 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
412         sc->bge_vpd_prodname[i] = '\0';
413         pos += i;
414
415         bge_vpd_read_res(sc, &res, pos);
416
417         if (res.vr_id != VPD_RES_READ) {
418                 device_printf(sc->bge_dev,
419                               "bad VPD resource id: expected %x got %x\n",
420                               VPD_RES_READ, res.vr_id);
421                 return;
422         }
423
424         pos += sizeof(res);
425         sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_INTWAIT);
426         for (i = 0; i < res.vr_len + 1; i++)
427                 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
428
429         return;
430 }
431 #endif
432
433 /*
434  * Read a byte of data stored in the EEPROM at address 'addr.' The
435  * BCM570x supports both the traditional bitbang interface and an
436  * auto access interface for reading the EEPROM. We use the auto
437  * access method.
438  */
439 static u_int8_t
440 bge_eeprom_getbyte(sc, addr, dest)
441         struct bge_softc *sc;
442         int addr;
443         u_int8_t *dest;
444 {
445         int i;
446         u_int32_t byte = 0;
447
448         /*
449          * Enable use of auto EEPROM access so we can avoid
450          * having to use the bitbang method.
451          */
452         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
453
454         /* Reset the EEPROM, load the clock period. */
455         CSR_WRITE_4(sc, BGE_EE_ADDR,
456             BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
457         DELAY(20);
458
459         /* Issue the read EEPROM command. */
460         CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
461
462         /* Wait for completion */
463         for(i = 0; i < BGE_TIMEOUT * 10; i++) {
464                 DELAY(10);
465                 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
466                         break;
467         }
468
469         if (i == BGE_TIMEOUT) {
470                 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
471                 return(0);
472         }
473
474         /* Get result. */
475         byte = CSR_READ_4(sc, BGE_EE_DATA);
476
477         *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
478
479         return(0);
480 }
481
482 /*
483  * Read a sequence of bytes from the EEPROM.
484  */
485 static int
486 bge_read_eeprom(sc, dest, off, cnt)
487         struct bge_softc *sc;
488         caddr_t dest;
489         int off;
490         int cnt;
491 {
492         int err = 0, i;
493         u_int8_t byte = 0;
494
495         for (i = 0; i < cnt; i++) {
496                 err = bge_eeprom_getbyte(sc, off + i, &byte);
497                 if (err)
498                         break;
499                 *(dest + i) = byte;
500         }
501
502         return(err ? 1 : 0);
503 }
504
505 static int
506 bge_miibus_readreg(dev, phy, reg)
507         device_t dev;
508         int phy, reg;
509 {
510         struct bge_softc *sc;
511         struct ifnet *ifp;
512         u_int32_t val, autopoll;
513         int i;
514
515         sc = device_get_softc(dev);
516         ifp = &sc->arpcom.ac_if;
517
518         /*
519          * Broadcom's own driver always assumes the internal
520          * PHY is at GMII address 1. On some chips, the PHY responds
521          * to accesses at all addresses, which could cause us to
522          * bogusly attach the PHY 32 times at probe type. Always
523          * restricting the lookup to address 1 is simpler than
524          * trying to figure out which chips revisions should be
525          * special-cased.
526          */
527         if (phy != 1)
528                 return(0);
529
530         /* Reading with autopolling on may trigger PCI errors */
531         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
532         if (autopoll & BGE_MIMODE_AUTOPOLL) {
533                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
534                 DELAY(40);
535         }
536
537         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
538             BGE_MIPHY(phy)|BGE_MIREG(reg));
539
540         for (i = 0; i < BGE_TIMEOUT; i++) {
541                 val = CSR_READ_4(sc, BGE_MI_COMM);
542                 if (!(val & BGE_MICOMM_BUSY))
543                         break;
544         }
545
546         if (i == BGE_TIMEOUT) {
547                 if_printf(ifp, "PHY read timed out\n");
548                 val = 0;
549                 goto done;
550         }
551
552         val = CSR_READ_4(sc, BGE_MI_COMM);
553
554 done:
555         if (autopoll & BGE_MIMODE_AUTOPOLL) {
556                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
557                 DELAY(40);
558         }
559
560         if (val & BGE_MICOMM_READFAIL)
561                 return(0);
562
563         return(val & 0xFFFF);
564 }
565
566 static int
567 bge_miibus_writereg(dev, phy, reg, val)
568         device_t dev;
569         int phy, reg, val;
570 {
571         struct bge_softc *sc;
572         u_int32_t autopoll;
573         int i;
574
575         sc = device_get_softc(dev);
576
577         /* Reading with autopolling on may trigger PCI errors */
578         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
579         if (autopoll & BGE_MIMODE_AUTOPOLL) {
580                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
581                 DELAY(40);
582         }
583
584         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
585             BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
586
587         for (i = 0; i < BGE_TIMEOUT; i++) {
588                 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
589                         break;
590         }
591
592         if (autopoll & BGE_MIMODE_AUTOPOLL) {
593                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
594                 DELAY(40);
595         }
596
597         if (i == BGE_TIMEOUT) {
598                 if_printf(&sc->arpcom.ac_if, "PHY read timed out\n");
599                 return(0);
600         }
601
602         return(0);
603 }
604
605 static void
606 bge_miibus_statchg(dev)
607         device_t dev;
608 {
609         struct bge_softc *sc;
610         struct mii_data *mii;
611
612         sc = device_get_softc(dev);
613         mii = device_get_softc(sc->bge_miibus);
614
615         BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
616         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
617                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
618         } else {
619                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
620         }
621
622         if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
623                 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
624         } else {
625                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
626         }
627
628         return;
629 }
630
631 /*
632  * Handle events that have triggered interrupts.
633  */
634 static void
635 bge_handle_events(sc)
636         struct bge_softc                *sc;
637 {
638
639         return;
640 }
641
642 /*
643  * Memory management for jumbo frames.
644  */
645
646 static int
647 bge_alloc_jumbo_mem(sc)
648         struct bge_softc                *sc;
649 {
650         caddr_t                 ptr;
651         int             i;
652         struct bge_jpool_entry   *entry;
653
654         /* Grab a big chunk o' storage. */
655         sc->bge_cdata.bge_jumbo_buf = contigmalloc(BGE_JMEM, M_DEVBUF,
656                 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
657
658         if (sc->bge_cdata.bge_jumbo_buf == NULL) {
659                 if_printf(&sc->arpcom.ac_if, "no memory for jumbo buffers!\n");
660                 return(ENOBUFS);
661         }
662
663         SLIST_INIT(&sc->bge_jfree_listhead);
664         SLIST_INIT(&sc->bge_jinuse_listhead);
665
666         /*
667          * Now divide it up into 9K pieces and save the addresses
668          * in an array. Note that we play an evil trick here by using
669          * the first few bytes in the buffer to hold the the address
670          * of the softc structure for this interface. This is because
671          * bge_jfree() needs it, but it is called by the mbuf management
672          * code which will not pass it to us explicitly.
673          */
674         ptr = sc->bge_cdata.bge_jumbo_buf;
675         for (i = 0; i < BGE_JSLOTS; i++) {
676                 u_int64_t               **aptr;
677                 aptr = (u_int64_t **)ptr;
678                 aptr[0] = (u_int64_t *)sc;
679                 ptr += sizeof(u_int64_t);
680                 sc->bge_cdata.bge_jslots[i].bge_buf = ptr;
681                 sc->bge_cdata.bge_jslots[i].bge_inuse = 0;
682                 ptr += (BGE_JLEN - sizeof(u_int64_t));
683                 entry = malloc(sizeof(struct bge_jpool_entry), 
684                                M_DEVBUF, M_INTWAIT);
685                 entry->slot = i;
686                 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
687                     entry, jpool_entries);
688         }
689
690         return(0);
691 }
692
693 static void
694 bge_free_jumbo_mem(sc)
695         struct bge_softc *sc;
696 {
697         int i;
698         struct bge_jpool_entry *entry;
699  
700         for (i = 0; i < BGE_JSLOTS; i++) {
701                 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
702                 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
703                 free(entry, M_DEVBUF);
704         }
705
706         contigfree(sc->bge_cdata.bge_jumbo_buf, BGE_JMEM, M_DEVBUF);
707
708         return;
709 }
710
711 /*
712  * Allocate a jumbo buffer.
713  */
714 static void *
715 bge_jalloc(sc)
716         struct bge_softc                *sc;
717 {
718         struct bge_jpool_entry   *entry;
719         
720         entry = SLIST_FIRST(&sc->bge_jfree_listhead);
721         
722         if (entry == NULL) {
723                 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
724                 return(NULL);
725         }
726
727         SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
728         SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
729         sc->bge_cdata.bge_jslots[entry->slot].bge_inuse = 1;
730         return(sc->bge_cdata.bge_jslots[entry->slot].bge_buf);
731 }
732
733 /*
734  * Adjust usage count on a jumbo buffer.
735  */
736 static void
737 bge_jref(buf, size)
738         caddr_t                 buf;
739         u_int                   size;
740 {
741         struct bge_softc                *sc;
742         u_int64_t               **aptr;
743         int             i;
744
745         /* Extract the softc struct pointer. */
746         aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
747         sc = (struct bge_softc *)(aptr[0]);
748
749         if (sc == NULL)
750                 panic("bge_jref: can't find softc pointer!");
751
752         if (size != BGE_JUMBO_FRAMELEN)
753                 panic("bge_jref: adjusting refcount of buf of wrong size!");
754
755         /* calculate the slot this buffer belongs to */
756
757         i = ((vm_offset_t)aptr 
758              - (vm_offset_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
759
760         if ((i < 0) || (i >= BGE_JSLOTS))
761                 panic("bge_jref: asked to reference buffer "
762                     "that we don't manage!");
763         else if (sc->bge_cdata.bge_jslots[i].bge_inuse == 0)
764                 panic("bge_jref: buffer already free!");
765         else
766                 sc->bge_cdata.bge_jslots[i].bge_inuse++;
767
768         return;
769 }
770
771 /*
772  * Release a jumbo buffer.
773  */
774 static void
775 bge_jfree(buf, size)
776         caddr_t                 buf;
777         u_int                   size;
778 {
779         struct bge_softc                *sc;
780         u_int64_t               **aptr;
781         int                     i;
782         struct bge_jpool_entry   *entry;
783
784         /* Extract the softc struct pointer. */
785         aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
786         sc = (struct bge_softc *)(aptr[0]);
787
788         if (sc == NULL)
789                 panic("bge_jfree: can't find softc pointer!");
790
791         if (size != BGE_JUMBO_FRAMELEN)
792                 panic("bge_jfree: freeing buffer of wrong size!");
793
794         /* calculate the slot this buffer belongs to */
795
796         i = ((vm_offset_t)aptr 
797              - (vm_offset_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
798
799         if ((i < 0) || (i >= BGE_JSLOTS))
800                 panic("bge_jfree: asked to free buffer that we don't manage!");
801         else if (sc->bge_cdata.bge_jslots[i].bge_inuse == 0)
802                 panic("bge_jfree: buffer already free!");
803         else {
804                 sc->bge_cdata.bge_jslots[i].bge_inuse--;
805                 if(sc->bge_cdata.bge_jslots[i].bge_inuse == 0) {
806                         entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
807                         if (entry == NULL)
808                                 panic("bge_jfree: buffer not in use!");
809                         entry->slot = i;
810                         SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, 
811                                           jpool_entries);
812                         SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 
813                                           entry, jpool_entries);
814                 }
815         }
816
817         return;
818 }
819
820
821 /*
822  * Intialize a standard receive ring descriptor.
823  */
824 static int
825 bge_newbuf_std(sc, i, m)
826         struct bge_softc        *sc;
827         int                     i;
828         struct mbuf             *m;
829 {
830         struct mbuf             *m_new = NULL;
831         struct bge_rx_bd        *r;
832
833         if (m == NULL) {
834                 MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
835                 if (m_new == NULL) {
836                         return(ENOBUFS);
837                 }
838
839                 MCLGET(m_new, MB_DONTWAIT);
840                 if (!(m_new->m_flags & M_EXT)) {
841                         m_freem(m_new);
842                         return(ENOBUFS);
843                 }
844                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
845         } else {
846                 m_new = m;
847                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
848                 m_new->m_data = m_new->m_ext.ext_buf;
849         }
850
851         if (!sc->bge_rx_alignment_bug)
852                 m_adj(m_new, ETHER_ALIGN);
853         sc->bge_cdata.bge_rx_std_chain[i] = m_new;
854         r = &sc->bge_rdata->bge_rx_std_ring[i];
855         BGE_HOSTADDR(r->bge_addr, vtophys(mtod(m_new, caddr_t)));
856         r->bge_flags = BGE_RXBDFLAG_END;
857         r->bge_len = m_new->m_len;
858         r->bge_idx = i;
859
860         return(0);
861 }
862
863 /*
864  * Initialize a jumbo receive ring descriptor. This allocates
865  * a jumbo buffer from the pool managed internally by the driver.
866  */
867 static int
868 bge_newbuf_jumbo(sc, i, m)
869         struct bge_softc *sc;
870         int i;
871         struct mbuf *m;
872 {
873         struct mbuf *m_new = NULL;
874         struct bge_rx_bd *r;
875
876         if (m == NULL) {
877                 caddr_t                 *buf = NULL;
878
879                 /* Allocate the mbuf. */
880                 MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
881                 if (m_new == NULL) {
882                         return(ENOBUFS);
883                 }
884
885                 /* Allocate the jumbo buffer */
886                 buf = bge_jalloc(sc);
887                 if (buf == NULL) {
888                         m_freem(m_new);
889                         if_printf(&sc->arpcom.ac_if, "jumbo allocation failed "
890                             "-- packet dropped!\n");
891                         return(ENOBUFS);
892                 }
893
894                 /* Attach the buffer to the mbuf. */
895                 m_new->m_data = m_new->m_ext.ext_buf = (void *)buf;
896                 m_new->m_flags |= M_EXT | M_EXT_OLD;
897                 m_new->m_len = m_new->m_pkthdr.len =
898                     m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
899                 m_new->m_ext.ext_nfree.old = bge_jfree;
900                 m_new->m_ext.ext_nref.old = bge_jref;
901         } else {
902                 m_new = m;
903                 m_new->m_data = m_new->m_ext.ext_buf;
904                 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
905         }
906
907         if (!sc->bge_rx_alignment_bug)
908                 m_adj(m_new, ETHER_ALIGN);
909         /* Set up the descriptor. */
910         r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
911         sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
912         BGE_HOSTADDR(r->bge_addr, vtophys(mtod(m_new, caddr_t)));
913         r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
914         r->bge_len = m_new->m_len;
915         r->bge_idx = i;
916
917         return(0);
918 }
919
920 /*
921  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
922  * that's 1MB or memory, which is a lot. For now, we fill only the first
923  * 256 ring entries and hope that our CPU is fast enough to keep up with
924  * the NIC.
925  */
926 static int
927 bge_init_rx_ring_std(sc)
928         struct bge_softc *sc;
929 {
930         int i;
931
932         for (i = 0; i < BGE_SSLOTS; i++) {
933                 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
934                         return(ENOBUFS);
935         };
936
937         sc->bge_std = i - 1;
938         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
939
940         return(0);
941 }
942
943 static void
944 bge_free_rx_ring_std(sc)
945         struct bge_softc *sc;
946 {
947         int i;
948
949         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
950                 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
951                         m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
952                         sc->bge_cdata.bge_rx_std_chain[i] = NULL;
953                 }
954                 bzero((char *)&sc->bge_rdata->bge_rx_std_ring[i],
955                     sizeof(struct bge_rx_bd));
956         }
957
958         return;
959 }
960
961 static int
962 bge_init_rx_ring_jumbo(sc)
963         struct bge_softc *sc;
964 {
965         int i;
966         struct bge_rcb *rcb;
967
968         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
969                 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
970                         return(ENOBUFS);
971         };
972
973         sc->bge_jumbo = i - 1;
974
975         rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
976         rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
977         CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
978
979         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
980
981         return(0);
982 }
983
984 static void
985 bge_free_rx_ring_jumbo(sc)
986         struct bge_softc *sc;
987 {
988         int i;
989
990         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
991                 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
992                         m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
993                         sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
994                 }
995                 bzero((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i],
996                     sizeof(struct bge_rx_bd));
997         }
998
999         return;
1000 }
1001
1002 static void
1003 bge_free_tx_ring(sc)
1004         struct bge_softc *sc;
1005 {
1006         int i;
1007
1008         if (sc->bge_rdata->bge_tx_ring == NULL)
1009                 return;
1010
1011         for (i = 0; i < BGE_TX_RING_CNT; i++) {
1012                 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1013                         m_freem(sc->bge_cdata.bge_tx_chain[i]);
1014                         sc->bge_cdata.bge_tx_chain[i] = NULL;
1015                 }
1016                 bzero((char *)&sc->bge_rdata->bge_tx_ring[i],
1017                     sizeof(struct bge_tx_bd));
1018         }
1019
1020         return;
1021 }
1022
1023 static int
1024 bge_init_tx_ring(sc)
1025         struct bge_softc *sc;
1026 {
1027         sc->bge_txcnt = 0;
1028         sc->bge_tx_saved_considx = 0;
1029
1030         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1031         /* 5700 b2 errata */
1032         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1033                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1034
1035         CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1036         /* 5700 b2 errata */
1037         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1038                 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1039
1040         return(0);
1041 }
1042
1043 #define BGE_POLY        0xEDB88320
1044
1045 static u_int32_t
1046 bge_crc(addr)
1047         caddr_t addr;
1048 {
1049         u_int32_t idx, bit, data, crc;
1050
1051         /* Compute CRC for the address value. */
1052         crc = 0xFFFFFFFF; /* initial value */
1053
1054         for (idx = 0; idx < 6; idx++) {
1055                 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1)
1056                         crc = (crc >> 1) ^ (((crc ^ data) & 1) ? BGE_POLY : 0);
1057         }
1058
1059         return(crc & 0x7F);
1060 }
1061
1062 static void
1063 bge_setmulti(sc)
1064         struct bge_softc *sc;
1065 {
1066         struct ifnet *ifp;
1067         struct ifmultiaddr *ifma;
1068         u_int32_t hashes[4] = { 0, 0, 0, 0 };
1069         int h, i;
1070
1071         ifp = &sc->arpcom.ac_if;
1072
1073         if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1074                 for (i = 0; i < 4; i++)
1075                         CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1076                 return;
1077         }
1078
1079         /* First, zot all the existing filters. */
1080         for (i = 0; i < 4; i++)
1081                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1082
1083         /* Now program new ones. */
1084         for (ifma = ifp->if_multiaddrs.lh_first;
1085             ifma != NULL; ifma = ifma->ifma_link.le_next) {
1086                 if (ifma->ifma_addr->sa_family != AF_LINK)
1087                         continue;
1088                 h = bge_crc(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1089                 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1090         }
1091
1092         for (i = 0; i < 4; i++)
1093                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1094
1095         return;
1096 }
1097
1098 /*
1099  * Do endian, PCI and DMA initialization. Also check the on-board ROM
1100  * self-test results.
1101  */
1102 static int
1103 bge_chipinit(sc)
1104         struct bge_softc *sc;
1105 {
1106         int                     i;
1107         u_int32_t               dma_rw_ctl;
1108
1109         /* Set endianness before we access any non-PCI registers. */
1110 #if BYTE_ORDER == BIG_ENDIAN
1111         pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1112             BGE_BIGENDIAN_INIT, 4);
1113 #else
1114         pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1115             BGE_LITTLEENDIAN_INIT, 4);
1116 #endif
1117
1118         /*
1119          * Check the 'ROM failed' bit on the RX CPU to see if
1120          * self-tests passed.
1121          */
1122         if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1123                 if_printf(&sc->arpcom.ac_if,
1124                           "RX CPU self-diagnostics failed!\n");
1125                 return(ENODEV);
1126         }
1127
1128         /* Clear the MAC control register */
1129         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1130
1131         /*
1132          * Clear the MAC statistics block in the NIC's
1133          * internal memory.
1134          */
1135         for (i = BGE_STATS_BLOCK;
1136             i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1137                 BGE_MEMWIN_WRITE(sc, i, 0);
1138
1139         for (i = BGE_STATUS_BLOCK;
1140             i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1141                 BGE_MEMWIN_WRITE(sc, i, 0);
1142
1143         /* Set up the PCI DMA control register. */
1144         if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1145             BGE_PCISTATE_PCI_BUSMODE) {
1146                 /* Conventional PCI bus */
1147                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1148                     (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1149                     (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1150                     (0x0F);
1151         } else {
1152                 /* PCI-X bus */
1153                 /*
1154                  * The 5704 uses a different encoding of read/write
1155                  * watermarks.
1156                  */
1157                 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1158                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1159                             (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1160                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1161                 else
1162                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1163                             (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1164                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1165                             (0x0F);
1166
1167                 /*
1168                  * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1169                  * for hardware bugs.
1170                  */
1171                 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1172                     sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1173                         u_int32_t tmp;
1174
1175                         tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1176                         if (tmp == 0x6 || tmp == 0x7)
1177                                 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1178                 }
1179         }
1180
1181         if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1182             sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1183             sc->bge_asicrev == BGE_ASICREV_BCM5705)
1184                 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1185         pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1186
1187         /*
1188          * Set up general mode register.
1189          */
1190         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME|
1191             BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1192             BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1193             BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
1194
1195         /*
1196          * Disable memory write invalidate.  Apparently it is not supported
1197          * properly by these devices.
1198          */
1199         PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1200
1201         /* Set the timer prescaler (always 66Mhz) */
1202         CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1203
1204         return(0);
1205 }
1206
1207 static int
1208 bge_blockinit(sc)
1209         struct bge_softc *sc;
1210 {
1211         struct bge_rcb *rcb;
1212         volatile struct bge_rcb *vrcb;
1213         int i;
1214
1215         /*
1216          * Initialize the memory window pointer register so that
1217          * we can access the first 32K of internal NIC RAM. This will
1218          * allow us to set up the TX send ring RCBs and the RX return
1219          * ring RCBs, plus other things which live in NIC memory.
1220          */
1221         CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1222
1223         /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1224
1225         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1226                 /* Configure mbuf memory pool */
1227                 if (sc->bge_extram) {
1228                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1229                             BGE_EXT_SSRAM);
1230                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1231                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1232                         else
1233                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1234                 } else {
1235                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1236                             BGE_BUFFPOOL_1);
1237                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1238                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1239                         else
1240                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1241                 }
1242
1243                 /* Configure DMA resource pool */
1244                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1245                     BGE_DMA_DESCRIPTORS);
1246                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1247         }
1248
1249         /* Configure mbuf pool watermarks */
1250         if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
1251                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1252                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1253         } else {
1254                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1255                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1256         }
1257         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1258
1259         /* Configure DMA resource watermarks */
1260         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1261         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1262
1263         /* Enable buffer manager */
1264         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1265                 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1266                     BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1267
1268                 /* Poll for buffer manager start indication */
1269                 for (i = 0; i < BGE_TIMEOUT; i++) {
1270                         if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1271                                 break;
1272                         DELAY(10);
1273                 }
1274
1275                 if (i == BGE_TIMEOUT) {
1276                         if_printf(&sc->arpcom.ac_if,
1277                                   "buffer manager failed to start\n");
1278                         return(ENXIO);
1279                 }
1280         }
1281
1282         /* Enable flow-through queues */
1283         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1284         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1285
1286         /* Wait until queue initialization is complete */
1287         for (i = 0; i < BGE_TIMEOUT; i++) {
1288                 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1289                         break;
1290                 DELAY(10);
1291         }
1292
1293         if (i == BGE_TIMEOUT) {
1294                 if_printf(&sc->arpcom.ac_if,
1295                           "flow-through queue init failed\n");
1296                 return(ENXIO);
1297         }
1298
1299         /* Initialize the standard RX ring control block */
1300         rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1301         BGE_HOSTADDR(rcb->bge_hostaddr,
1302             vtophys(&sc->bge_rdata->bge_rx_std_ring));
1303         if (sc->bge_asicrev == BGE_ASICREV_BCM5705)
1304                 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1305         else
1306                 rcb->bge_maxlen_flags =
1307                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1308         if (sc->bge_extram)
1309                 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1310         else
1311                 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1312         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1313         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1314         CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1315         CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1316
1317         /*
1318          * Initialize the jumbo RX ring control block
1319          * We set the 'ring disabled' bit in the flags
1320          * field until we're actually ready to start
1321          * using this ring (i.e. once we set the MTU
1322          * high enough to require it).
1323          */
1324         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1325                 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1326                 BGE_HOSTADDR(rcb->bge_hostaddr,
1327                     vtophys(&sc->bge_rdata->bge_rx_jumbo_ring));
1328                 rcb->bge_maxlen_flags =
1329                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1330                     BGE_RCB_FLAG_RING_DISABLED);
1331                 if (sc->bge_extram)
1332                         rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1333                 else
1334                         rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1335                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1336                     rcb->bge_hostaddr.bge_addr_hi);
1337                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1338                     rcb->bge_hostaddr.bge_addr_lo);
1339                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1340                     rcb->bge_maxlen_flags);
1341                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1342
1343                 /* Set up dummy disabled mini ring RCB */
1344                 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
1345                 rcb->bge_maxlen_flags =
1346                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1347                 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1348                     rcb->bge_maxlen_flags);
1349         }
1350
1351         /*
1352          * Set the BD ring replentish thresholds. The recommended
1353          * values are 1/8th the number of descriptors allocated to
1354          * each ring.
1355          */
1356         CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1357         CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1358
1359         /*
1360          * Disable all unused send rings by setting the 'ring disabled'
1361          * bit in the flags field of all the TX send ring control blocks.
1362          * These are located in NIC memory.
1363          */
1364         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1365             BGE_SEND_RING_RCB);
1366         for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1367                 vrcb->bge_maxlen_flags =
1368                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1369                 vrcb->bge_nicaddr = 0;
1370                 vrcb++;
1371         }
1372
1373         /* Configure TX RCB 0 (we use only the first ring) */
1374         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1375             BGE_SEND_RING_RCB);
1376         vrcb->bge_hostaddr.bge_addr_hi = 0;
1377         BGE_HOSTADDR(vrcb->bge_hostaddr, vtophys(&sc->bge_rdata->bge_tx_ring));
1378         vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT);
1379         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1380                 vrcb->bge_maxlen_flags =
1381                     BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0);
1382
1383         /* Disable all unused RX return rings */
1384         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1385             BGE_RX_RETURN_RING_RCB);
1386         for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1387                 vrcb->bge_hostaddr.bge_addr_hi = 0;
1388                 vrcb->bge_hostaddr.bge_addr_lo = 0;
1389                 vrcb->bge_maxlen_flags =
1390                     BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1391                     BGE_RCB_FLAG_RING_DISABLED);
1392                 vrcb->bge_nicaddr = 0;
1393                 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1394                     (i * (sizeof(u_int64_t))), 0);
1395                 vrcb++;
1396         }
1397
1398         /* Initialize RX ring indexes */
1399         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1400         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1401         CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1402
1403         /*
1404          * Set up RX return ring 0
1405          * Note that the NIC address for RX return rings is 0x00000000.
1406          * The return rings live entirely within the host, so the
1407          * nicaddr field in the RCB isn't used.
1408          */
1409         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1410             BGE_RX_RETURN_RING_RCB);
1411         vrcb->bge_hostaddr.bge_addr_hi = 0;
1412         BGE_HOSTADDR(vrcb->bge_hostaddr,
1413             vtophys(&sc->bge_rdata->bge_rx_return_ring));
1414         vrcb->bge_nicaddr = 0x00000000;
1415         vrcb->bge_maxlen_flags =
1416             BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0);
1417
1418         /* Set random backoff seed for TX */
1419         CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1420             sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1421             sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1422             sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1423             BGE_TX_BACKOFF_SEED_MASK);
1424
1425         /* Set inter-packet gap */
1426         CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1427
1428         /*
1429          * Specify which ring to use for packets that don't match
1430          * any RX rules.
1431          */
1432         CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1433
1434         /*
1435          * Configure number of RX lists. One interrupt distribution
1436          * list, sixteen active lists, one bad frames class.
1437          */
1438         CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1439
1440         /* Inialize RX list placement stats mask. */
1441         CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1442         CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1443
1444         /* Disable host coalescing until we get it set up */
1445         CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1446
1447         /* Poll to make sure it's shut down. */
1448         for (i = 0; i < BGE_TIMEOUT; i++) {
1449                 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1450                         break;
1451                 DELAY(10);
1452         }
1453
1454         if (i == BGE_TIMEOUT) {
1455                 if_printf(&sc->arpcom.ac_if,
1456                           "host coalescing engine failed to idle\n");
1457                 return(ENXIO);
1458         }
1459
1460         /* Set up host coalescing defaults */
1461         CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1462         CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1463         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1464         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1465         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1466                 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1467                 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1468         }
1469         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1470         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1471
1472         /* Set up address of statistics block */
1473         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1474                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 0);
1475                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1476                     vtophys(&sc->bge_rdata->bge_info.bge_stats));
1477
1478                 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1479                 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1480                 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1481         }
1482
1483         /* Set up address of status block */
1484         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 0);
1485         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1486             vtophys(&sc->bge_rdata->bge_status_block));
1487
1488         sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
1489         sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
1490
1491         /* Turn on host coalescing state machine */
1492         CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1493
1494         /* Turn on RX BD completion state machine and enable attentions */
1495         CSR_WRITE_4(sc, BGE_RBDC_MODE,
1496             BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1497
1498         /* Turn on RX list placement state machine */
1499         CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1500
1501         /* Turn on RX list selector state machine. */
1502         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1503                 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1504
1505         /* Turn on DMA, clear stats */
1506         CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1507             BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1508             BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1509             BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1510             (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1511
1512         /* Set misc. local control, enable interrupts on attentions */
1513         CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1514
1515 #ifdef notdef
1516         /* Assert GPIO pins for PHY reset */
1517         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1518             BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1519         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1520             BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1521 #endif
1522
1523         /* Turn on DMA completion state machine */
1524         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1525                 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1526
1527         /* Turn on write DMA state machine */
1528         CSR_WRITE_4(sc, BGE_WDMA_MODE,
1529             BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1530         
1531         /* Turn on read DMA state machine */
1532         CSR_WRITE_4(sc, BGE_RDMA_MODE,
1533             BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1534
1535         /* Turn on RX data completion state machine */
1536         CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1537
1538         /* Turn on RX BD initiator state machine */
1539         CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1540
1541         /* Turn on RX data and RX BD initiator state machine */
1542         CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1543
1544         /* Turn on Mbuf cluster free state machine */
1545         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1546                 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1547
1548         /* Turn on send BD completion state machine */
1549         CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1550
1551         /* Turn on send data completion state machine */
1552         CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1553
1554         /* Turn on send data initiator state machine */
1555         CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1556
1557         /* Turn on send BD initiator state machine */
1558         CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1559
1560         /* Turn on send BD selector state machine */
1561         CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1562
1563         CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1564         CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1565             BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1566
1567         /* ack/clear link change events */
1568         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1569             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1570             BGE_MACSTAT_LINK_CHANGED);
1571
1572         /* Enable PHY auto polling (for MII/GMII only) */
1573         if (sc->bge_tbi) {
1574                 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1575         } else {
1576                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1577                 if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1578                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1579                             BGE_EVTENB_MI_INTERRUPT);
1580         }
1581
1582         /* Enable link state change attentions. */
1583         BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1584
1585         return(0);
1586 }
1587
1588 /*
1589  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1590  * against our list and return its name if we find a match. Note
1591  * that since the Broadcom controller contains VPD support, we
1592  * can get the device name string from the controller itself instead
1593  * of the compiled-in string. This is a little slow, but it guarantees
1594  * we'll always announce the right product name.
1595  */
1596 static int
1597 bge_probe(dev)
1598         device_t dev;
1599 {
1600         struct bge_type *t;
1601         struct bge_softc *sc;
1602         char *descbuf;
1603
1604         t = bge_devs;
1605
1606         sc = device_get_softc(dev);
1607         bzero(sc, sizeof(struct bge_softc));
1608         sc->bge_dev = dev;
1609
1610         while(t->bge_name != NULL) {
1611                 if ((pci_get_vendor(dev) == t->bge_vid) &&
1612                     (pci_get_device(dev) == t->bge_did)) {
1613 #ifdef notdef
1614                         bge_vpd_read(sc);
1615                         device_set_desc(dev, sc->bge_vpd_prodname);
1616 #endif
1617                         descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_INTWAIT);
1618                         snprintf(descbuf, BGE_DEVDESC_MAX,
1619                             "%s, ASIC rev. %#04x", t->bge_name,
1620                             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1621                         device_set_desc_copy(dev, descbuf);
1622                         if (pci_get_subvendor(dev) == PCI_VENDOR_DELL)
1623                                 sc->bge_no_3_led = 1;
1624                         free(descbuf, M_TEMP);
1625                         return(0);
1626                 }
1627                 t++;
1628         }
1629
1630         return(ENXIO);
1631 }
1632
1633 static int
1634 bge_attach(dev)
1635         device_t dev;
1636 {
1637         int s;
1638         u_int32_t command;
1639         struct ifnet *ifp;
1640         struct bge_softc *sc;
1641         u_int32_t hwcfg = 0;
1642         u_int32_t mac_addr = 0;
1643         int error = 0, rid;
1644         uint8_t ether_addr[ETHER_ADDR_LEN];
1645
1646         s = splimp();
1647
1648         sc = device_get_softc(dev);
1649         sc->bge_dev = dev;
1650         callout_init(&sc->bge_stat_timer);
1651
1652         /*
1653          * Map control/status registers.
1654          */
1655         pci_enable_busmaster(dev);
1656         pci_enable_io(dev, SYS_RES_MEMORY);
1657         command = pci_read_config(dev, PCIR_COMMAND, 4);
1658
1659         if (!(command & PCIM_CMD_MEMEN)) {
1660                 device_printf(dev, "failed to enable memory mapping!\n");
1661                 error = ENXIO;
1662                 goto fail;
1663         }
1664
1665         rid = BGE_PCI_BAR0;
1666         sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1667             RF_ACTIVE);
1668
1669         if (sc->bge_res == NULL) {
1670                 device_printf(dev, "couldn't map memory\n");
1671                 error = ENXIO;
1672                 goto fail;
1673         }
1674
1675         sc->bge_btag = rman_get_bustag(sc->bge_res);
1676         sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
1677         sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res);
1678
1679         /* Allocate interrupt */
1680         rid = 0;
1681         
1682         sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1683             RF_SHAREABLE | RF_ACTIVE);
1684
1685         if (sc->bge_irq == NULL) {
1686                 device_printf(dev, "couldn't map interrupt\n");
1687                 error = ENXIO;
1688                 goto fail;
1689         }
1690
1691         error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET,
1692            bge_intr, sc, &sc->bge_intrhand);
1693
1694         if (error) {
1695                 bge_release_resources(sc);
1696                 device_printf(dev, "couldn't set up irq\n");
1697                 goto fail;
1698         }
1699
1700         ifp = &sc->arpcom.ac_if;
1701         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1702
1703         /* Try to reset the chip. */
1704         bge_reset(sc);
1705
1706         if (bge_chipinit(sc)) {
1707                 device_printf(dev, "chip initialization failed\n");
1708                 bge_release_resources(sc);
1709                 error = ENXIO;
1710                 goto fail;
1711         }
1712
1713         /*
1714          * Get station address from the EEPROM.
1715          */
1716         mac_addr = bge_readmem_ind(sc, 0x0c14);
1717         if ((mac_addr >> 16) == 0x484b) {
1718                 ether_addr[0] = (uint8_t)(mac_addr >> 8);
1719                 ether_addr[1] = (uint8_t)mac_addr;
1720                 mac_addr = bge_readmem_ind(sc, 0x0c18);
1721                 ether_addr[2] = (uint8_t)(mac_addr >> 24);
1722                 ether_addr[3] = (uint8_t)(mac_addr >> 16);
1723                 ether_addr[4] = (uint8_t)(mac_addr >> 8);
1724                 ether_addr[5] = (uint8_t)mac_addr;
1725         } else if (bge_read_eeprom(sc, ether_addr,
1726             BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
1727                 device_printf(dev, "failed to read station address\n");
1728                 bge_release_resources(sc);
1729                 error = ENXIO;
1730                 goto fail;
1731         }
1732
1733         /* Allocate the general information block and ring buffers. */
1734         sc->bge_rdata = contigmalloc(sizeof(struct bge_ring_data), M_DEVBUF,
1735             M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1736
1737         if (sc->bge_rdata == NULL) {
1738                 bge_release_resources(sc);
1739                 error = ENXIO;
1740                 device_printf(dev, "no memory for list buffers!\n");
1741                 goto fail;
1742         }
1743
1744         bzero(sc->bge_rdata, sizeof(struct bge_ring_data));
1745
1746         /* Save ASIC rev. */
1747
1748         sc->bge_chipid =
1749             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1750             BGE_PCIMISCCTL_ASICREV;
1751         sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
1752         sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
1753
1754         /*
1755          * Try to allocate memory for jumbo buffers.
1756          * The 5705 does not appear to support jumbo frames.
1757          */
1758         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1759                 if (bge_alloc_jumbo_mem(sc)) {
1760                         device_printf(dev, "jumbo buffer allocation failed\n");
1761                         bge_release_resources(sc);
1762                         error = ENXIO;
1763                         goto fail;
1764                 }
1765         }
1766
1767         /* Set default tuneable values. */
1768         sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
1769         sc->bge_rx_coal_ticks = 150;
1770         sc->bge_tx_coal_ticks = 150;
1771         sc->bge_rx_max_coal_bds = 64;
1772         sc->bge_tx_max_coal_bds = 128;
1773
1774         /* 5705 limits RX return ring to 512 entries. */
1775         if (sc->bge_asicrev == BGE_ASICREV_BCM5705)
1776                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1777         else
1778                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
1779
1780         /* Set up ifnet structure */
1781         ifp->if_softc = sc;
1782         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1783         ifp->if_ioctl = bge_ioctl;
1784         ifp->if_start = bge_start;
1785         ifp->if_watchdog = bge_watchdog;
1786         ifp->if_init = bge_init;
1787         ifp->if_mtu = ETHERMTU;
1788         ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1789         ifq_set_ready(&ifp->if_snd);
1790         ifp->if_hwassist = BGE_CSUM_FEATURES;
1791         ifp->if_capabilities = IFCAP_HWCSUM;
1792         ifp->if_capenable = ifp->if_capabilities;
1793
1794         /*
1795          * Figure out what sort of media we have by checking the
1796          * hardware config word in the first 32k of NIC internal memory,
1797          * or fall back to examining the EEPROM if necessary.
1798          * Note: on some BCM5700 cards, this value appears to be unset.
1799          * If that's the case, we have to rely on identifying the NIC
1800          * by its PCI subsystem ID, as we do below for the SysKonnect
1801          * SK-9D41.
1802          */
1803         if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
1804                 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
1805         else {
1806                 bge_read_eeprom(sc, (caddr_t)&hwcfg,
1807                                 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
1808                 hwcfg = ntohl(hwcfg);
1809         }
1810
1811         if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
1812                 sc->bge_tbi = 1;
1813
1814         /* The SysKonnect SK-9D41 is a 1000baseSX card. */
1815         if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41)
1816                 sc->bge_tbi = 1;
1817
1818         if (sc->bge_tbi) {
1819                 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
1820                     bge_ifmedia_upd, bge_ifmedia_sts);
1821                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
1822                 ifmedia_add(&sc->bge_ifmedia,
1823                     IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
1824                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1825                 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
1826         } else {
1827                 /*
1828                  * Do transceiver setup.
1829                  */
1830                 if (mii_phy_probe(dev, &sc->bge_miibus,
1831                     bge_ifmedia_upd, bge_ifmedia_sts)) {
1832                         device_printf(dev, "MII without any PHY!\n");
1833                         bge_release_resources(sc);
1834                         bge_free_jumbo_mem(sc);
1835                         error = ENXIO;
1836                         goto fail;
1837                 }
1838         }
1839
1840         /*
1841          * When using the BCM5701 in PCI-X mode, data corruption has
1842          * been observed in the first few bytes of some received packets.
1843          * Aligning the packet buffer in memory eliminates the corruption.
1844          * Unfortunately, this misaligns the packet payloads.  On platforms
1845          * which do not support unaligned accesses, we will realign the
1846          * payloads by copying the received packets.
1847          */
1848         switch (sc->bge_chipid) {
1849         case BGE_CHIPID_BCM5701_A0:
1850         case BGE_CHIPID_BCM5701_B0:
1851         case BGE_CHIPID_BCM5701_B2:
1852         case BGE_CHIPID_BCM5701_B5:
1853                 /* If in PCI-X mode, work around the alignment bug. */
1854                 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
1855                     (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
1856                     BGE_PCISTATE_PCI_BUSSPEED)
1857                         sc->bge_rx_alignment_bug = 1;
1858                 break;
1859         }
1860
1861         /*
1862          * Call MI attach routine.
1863          */
1864         ether_ifattach(ifp, ether_addr);
1865
1866 fail:
1867         splx(s);
1868
1869         return(error);
1870 }
1871
1872 static int
1873 bge_detach(dev)
1874         device_t dev;
1875 {
1876         struct bge_softc *sc;
1877         struct ifnet *ifp;
1878         int s;
1879
1880         s = splimp();
1881
1882         sc = device_get_softc(dev);
1883         ifp = &sc->arpcom.ac_if;
1884
1885         ether_ifdetach(ifp);
1886         bge_stop(sc);
1887         bge_reset(sc);
1888
1889         if (sc->bge_tbi) {
1890                 ifmedia_removeall(&sc->bge_ifmedia);
1891         } else {
1892                 bus_generic_detach(dev);
1893                 device_delete_child(dev, sc->bge_miibus);
1894         }
1895
1896         bge_release_resources(sc);
1897         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1898                 bge_free_jumbo_mem(sc);
1899
1900         splx(s);
1901
1902         return(0);
1903 }
1904
1905 static void
1906 bge_release_resources(sc)
1907         struct bge_softc *sc;
1908 {
1909         device_t dev;
1910
1911         dev = sc->bge_dev;
1912
1913         if (sc->bge_vpd_prodname != NULL)
1914                 free(sc->bge_vpd_prodname, M_DEVBUF);
1915
1916         if (sc->bge_vpd_readonly != NULL)
1917                 free(sc->bge_vpd_readonly, M_DEVBUF);
1918
1919         if (sc->bge_intrhand != NULL)
1920                 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
1921
1922         if (sc->bge_irq != NULL)
1923                 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
1924
1925         if (sc->bge_res != NULL)
1926                 bus_release_resource(dev, SYS_RES_MEMORY,
1927                     BGE_PCI_BAR0, sc->bge_res);
1928
1929         if (sc->bge_rdata != NULL)
1930                 contigfree(sc->bge_rdata,
1931                     sizeof(struct bge_ring_data), M_DEVBUF);
1932
1933         return;
1934 }
1935
1936 static void
1937 bge_reset(sc)
1938         struct bge_softc *sc;
1939 {
1940         device_t dev;
1941         u_int32_t cachesize, command, pcistate;
1942         int i, val = 0;
1943
1944         dev = sc->bge_dev;
1945
1946         /* Save some important PCI state. */
1947         cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
1948         command = pci_read_config(dev, BGE_PCI_CMD, 4);
1949         pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
1950
1951         pci_write_config(dev, BGE_PCI_MISC_CTL,
1952             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1953             BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1954
1955         /* Issue global reset */
1956         bge_writereg_ind(sc, BGE_MISC_CFG,
1957             BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1));
1958
1959         DELAY(1000);
1960
1961         /* Reset some of the PCI state that got zapped by reset */
1962         pci_write_config(dev, BGE_PCI_MISC_CTL,
1963             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1964             BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1965         pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
1966         pci_write_config(dev, BGE_PCI_CMD, command, 4);
1967         bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
1968
1969         /*
1970          * Prevent PXE restart: write a magic number to the
1971          * general communications memory at 0xB50.
1972          */
1973         bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1974         /*
1975          * Poll the value location we just wrote until
1976          * we see the 1's complement of the magic number.
1977          * This indicates that the firmware initialization
1978          * is complete.
1979          */
1980         for (i = 0; i < BGE_TIMEOUT; i++) {
1981                 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
1982                 if (val == ~BGE_MAGIC_NUMBER)
1983                         break;
1984                 DELAY(10);
1985         }
1986         
1987         if (i == BGE_TIMEOUT) {
1988                 if_printf(&sc->arpcom.ac_if, "firmware handshake timed out\n");
1989                 return;
1990         }
1991
1992         /*
1993          * XXX Wait for the value of the PCISTATE register to
1994          * return to its original pre-reset state. This is a
1995          * fairly good indicator of reset completion. If we don't
1996          * wait for the reset to fully complete, trying to read
1997          * from the device's non-PCI registers may yield garbage
1998          * results.
1999          */
2000         for (i = 0; i < BGE_TIMEOUT; i++) {
2001                 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2002                         break;
2003                 DELAY(10);
2004         }
2005
2006         /* Enable memory arbiter. */
2007         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
2008                 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2009
2010         /* Fix up byte swapping */
2011         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME|
2012             BGE_MODECTL_BYTESWAP_DATA);
2013
2014         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2015
2016         DELAY(10000);
2017
2018         return;
2019 }
2020
2021 /*
2022  * Frame reception handling. This is called if there's a frame
2023  * on the receive return list.
2024  *
2025  * Note: we have to be able to handle two possibilities here:
2026  * 1) the frame is from the jumbo recieve ring
2027  * 2) the frame is from the standard receive ring
2028  */
2029
2030 static void
2031 bge_rxeof(sc)
2032         struct bge_softc *sc;
2033 {
2034         struct ifnet *ifp;
2035         int stdcnt = 0, jumbocnt = 0;
2036
2037         ifp = &sc->arpcom.ac_if;
2038
2039         while(sc->bge_rx_saved_considx !=
2040             sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) {
2041                 struct bge_rx_bd        *cur_rx;
2042                 u_int32_t               rxidx;
2043                 struct mbuf             *m = NULL;
2044                 u_int16_t               vlan_tag = 0;
2045                 int                     have_tag = 0;
2046
2047                 cur_rx =
2048             &sc->bge_rdata->bge_rx_return_ring[sc->bge_rx_saved_considx];
2049
2050                 rxidx = cur_rx->bge_idx;
2051                 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2052
2053                 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2054                         have_tag = 1;
2055                         vlan_tag = cur_rx->bge_vlan_tag;
2056                 }
2057
2058                 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2059                         BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2060                         m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2061                         sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2062                         jumbocnt++;
2063                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2064                                 ifp->if_ierrors++;
2065                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2066                                 continue;
2067                         }
2068                         if (bge_newbuf_jumbo(sc,
2069                             sc->bge_jumbo, NULL) == ENOBUFS) {
2070                                 ifp->if_ierrors++;
2071                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2072                                 continue;
2073                         }
2074                 } else {
2075                         BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2076                         m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2077                         sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2078                         stdcnt++;
2079                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2080                                 ifp->if_ierrors++;
2081                                 bge_newbuf_std(sc, sc->bge_std, m);
2082                                 continue;
2083                         }
2084                         if (bge_newbuf_std(sc, sc->bge_std,
2085                             NULL) == ENOBUFS) {
2086                                 ifp->if_ierrors++;
2087                                 bge_newbuf_std(sc, sc->bge_std, m);
2088                                 continue;
2089                         }
2090                 }
2091
2092                 ifp->if_ipackets++;
2093 #ifndef __i386__
2094                 /*
2095                  * The i386 allows unaligned accesses, but for other
2096                  * platforms we must make sure the payload is aligned.
2097                  */
2098                 if (sc->bge_rx_alignment_bug) {
2099                         bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2100                             cur_rx->bge_len);
2101                         m->m_data += ETHER_ALIGN;
2102                 }
2103 #endif
2104                 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2105                 m->m_pkthdr.rcvif = ifp;
2106
2107 #if 0 /* currently broken for some packets, possibly related to TCP options */
2108                 if (ifp->if_hwassist) {
2109                         m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2110                         if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2111                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2112                         if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2113                                 m->m_pkthdr.csum_data =
2114                                     cur_rx->bge_tcp_udp_csum;
2115                                 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
2116                         }
2117                 }
2118 #endif
2119
2120                 /*
2121                  * If we received a packet with a vlan tag, pass it
2122                  * to vlan_input() instead of ether_input().
2123                  */
2124                 if (have_tag) {
2125                         VLAN_INPUT_TAG(m, vlan_tag);
2126                         have_tag = vlan_tag = 0;
2127                         continue;
2128                 }
2129
2130                 (*ifp->if_input)(ifp, m);
2131         }
2132
2133         CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2134         if (stdcnt)
2135                 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2136         if (jumbocnt)
2137                 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2138
2139         return;
2140 }
2141
2142 static void
2143 bge_txeof(sc)
2144         struct bge_softc *sc;
2145 {
2146         struct bge_tx_bd *cur_tx = NULL;
2147         struct ifnet *ifp;
2148
2149         ifp = &sc->arpcom.ac_if;
2150
2151         /*
2152          * Go through our tx ring and free mbufs for those
2153          * frames that have been sent.
2154          */
2155         while (sc->bge_tx_saved_considx !=
2156             sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
2157                 u_int32_t               idx = 0;
2158
2159                 idx = sc->bge_tx_saved_considx;
2160                 cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
2161                 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2162                         ifp->if_opackets++;
2163                 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2164                         m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2165                         sc->bge_cdata.bge_tx_chain[idx] = NULL;
2166                 }
2167                 sc->bge_txcnt--;
2168                 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2169                 ifp->if_timer = 0;
2170         }
2171
2172         if (cur_tx != NULL)
2173                 ifp->if_flags &= ~IFF_OACTIVE;
2174
2175         return;
2176 }
2177
2178 static void
2179 bge_intr(xsc)
2180         void *xsc;
2181 {
2182         struct bge_softc *sc;
2183         struct ifnet *ifp;
2184         u_int32_t status;
2185
2186         sc = xsc;
2187         ifp = &sc->arpcom.ac_if;
2188
2189 #ifdef notdef
2190         /* Avoid this for now -- checking this register is expensive. */
2191         /* Make sure this is really our interrupt. */
2192         if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2193                 return;
2194 #endif
2195         /* Ack interrupt and stop others from occuring. */
2196         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2197
2198         /*
2199          * Process link state changes.
2200          * Grrr. The link status word in the status block does
2201          * not work correctly on the BCM5700 rev AX and BX chips,
2202          * according to all available information. Hence, we have
2203          * to enable MII interrupts in order to properly obtain
2204          * async link changes. Unfortunately, this also means that
2205          * we have to read the MAC status register to detect link
2206          * changes, thereby adding an additional register access to
2207          * the interrupt handler.
2208          */
2209
2210         if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
2211                 status = CSR_READ_4(sc, BGE_MAC_STS);
2212                 if (status & BGE_MACSTAT_MI_INTERRUPT) {
2213                         sc->bge_link = 0;
2214                         callout_stop(&sc->bge_stat_timer);
2215                         bge_tick(sc);
2216                         /* Clear the interrupt */
2217                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2218                             BGE_EVTENB_MI_INTERRUPT);
2219                         bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
2220                         bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
2221                             BRGPHY_INTRS);
2222                 }
2223         } else {
2224                 if ((sc->bge_rdata->bge_status_block.bge_status &
2225                     BGE_STATFLAG_UPDATED) &&
2226                     (sc->bge_rdata->bge_status_block.bge_status &
2227                     BGE_STATFLAG_LINKSTATE_CHANGED)) {
2228                         sc->bge_rdata->bge_status_block.bge_status &=
2229                                 ~(BGE_STATFLAG_UPDATED|
2230                                 BGE_STATFLAG_LINKSTATE_CHANGED);
2231                         /*
2232                          * Sometimes PCS encoding errors are detected in
2233                          * TBI mode (on fiber NICs), and for some reason
2234                          * the chip will signal them as link changes.
2235                          * If we get a link change event, but the 'PCS
2236                          * encoding error' bit in the MAC status register
2237                          * is set, don't bother doing a link check.
2238                          * This avoids spurious "gigabit link up" messages
2239                          * that sometimes appear on fiber NICs during
2240                          * periods of heavy traffic. (There should be no
2241                          * effect on copper NICs.)
2242                          */
2243                         status = CSR_READ_4(sc, BGE_MAC_STS);
2244                         if (!(status & (BGE_MACSTAT_PORT_DECODE_ERROR|
2245                             BGE_MACSTAT_MI_COMPLETE))) {
2246                                 sc->bge_link = 0;
2247                                 callout_stop(&sc->bge_stat_timer);
2248                                 bge_tick(sc);
2249                         }
2250                         sc->bge_link = 0;
2251                         callout_stop(&sc->bge_stat_timer);
2252                         bge_tick(sc);
2253                         /* Clear the interrupt */
2254                         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
2255                             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
2256                             BGE_MACSTAT_LINK_CHANGED);
2257
2258                         /* Force flush the status block cached by PCI bridge */
2259                         CSR_READ_4(sc, BGE_MBX_IRQ0_LO);
2260                 }
2261         }
2262
2263         if (ifp->if_flags & IFF_RUNNING) {
2264                 /* Check RX return ring producer/consumer */
2265                 bge_rxeof(sc);
2266
2267                 /* Check TX ring producer/consumer */
2268                 bge_txeof(sc);
2269         }
2270
2271         bge_handle_events(sc);
2272
2273         /* Re-enable interrupts. */
2274         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2275
2276         if ((ifp->if_flags & IFF_RUNNING) && !ifq_is_empty(&ifp->if_snd))
2277                 bge_start(ifp);
2278
2279         return;
2280 }
2281
2282 static void
2283 bge_tick(xsc)
2284         void *xsc;
2285 {
2286         struct bge_softc *sc;
2287         struct mii_data *mii = NULL;
2288         struct ifmedia *ifm = NULL;
2289         struct ifnet *ifp;
2290         int s;
2291
2292         sc = xsc;
2293         ifp = &sc->arpcom.ac_if;
2294
2295         s = splimp();
2296
2297         if (sc->bge_asicrev == BGE_ASICREV_BCM5705)
2298                 bge_stats_update_regs(sc);
2299         else
2300                 bge_stats_update(sc);
2301         callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2302         if (sc->bge_link) {
2303                 splx(s);
2304                 return;
2305         }
2306
2307         if (sc->bge_tbi) {
2308                 ifm = &sc->bge_ifmedia;
2309                 if (CSR_READ_4(sc, BGE_MAC_STS) &
2310                     BGE_MACSTAT_TBI_PCS_SYNCHED) {
2311                         sc->bge_link++;
2312                         CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
2313                         if_printf(ifp, "gigabit link up\n");
2314                         if (!ifq_is_empty(&ifp->if_snd))
2315                                 bge_start(ifp);
2316                 }
2317                 splx(s);
2318                 return;
2319         }
2320
2321         mii = device_get_softc(sc->bge_miibus);
2322         mii_tick(mii);
2323  
2324         if (!sc->bge_link) {
2325                 mii_pollstat(mii);
2326                 if (mii->mii_media_status & IFM_ACTIVE &&
2327                     IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2328                         sc->bge_link++;
2329                         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
2330                             IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
2331                                 if_printf(ifp, "gigabit link up\n");
2332                         if (!ifq_is_empty(&ifp->if_snd))
2333                                 bge_start(ifp);
2334                 }
2335         }
2336
2337         splx(s);
2338
2339         return;
2340 }
2341
2342 static void
2343 bge_stats_update_regs(sc)
2344         struct bge_softc *sc;
2345 {
2346         struct ifnet *ifp;
2347         struct bge_mac_stats_regs stats;
2348         u_int32_t *s;
2349         int i;
2350
2351         ifp = &sc->arpcom.ac_if;
2352
2353         s = (u_int32_t *)&stats;
2354         for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2355                 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2356                 s++;
2357         }
2358
2359         ifp->if_collisions +=
2360            (stats.dot3StatsSingleCollisionFrames +
2361            stats.dot3StatsMultipleCollisionFrames +
2362            stats.dot3StatsExcessiveCollisions +
2363            stats.dot3StatsLateCollisions) -
2364            ifp->if_collisions;
2365
2366         return;
2367 }
2368
2369 static void
2370 bge_stats_update(sc)
2371         struct bge_softc *sc;
2372 {
2373         struct ifnet *ifp;
2374         struct bge_stats *stats;
2375
2376         ifp = &sc->arpcom.ac_if;
2377
2378         stats = (struct bge_stats *)(sc->bge_vhandle +
2379             BGE_MEMWIN_START + BGE_STATS_BLOCK);
2380
2381         ifp->if_collisions +=
2382            (stats->txstats.dot3StatsSingleCollisionFrames.bge_addr_lo +
2383            stats->txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo +
2384            stats->txstats.dot3StatsExcessiveCollisions.bge_addr_lo +
2385            stats->txstats.dot3StatsLateCollisions.bge_addr_lo) -
2386            ifp->if_collisions;
2387
2388 #ifdef notdef
2389         ifp->if_collisions +=
2390            (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2391            sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2392            sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2393            sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2394            ifp->if_collisions;
2395 #endif
2396
2397         return;
2398 }
2399
2400 /*
2401  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
2402  * pointers to descriptors.
2403  */
2404 static int
2405 bge_encap(sc, m_head, txidx)
2406         struct bge_softc *sc;
2407         struct mbuf *m_head;
2408         u_int32_t *txidx;
2409 {
2410         struct bge_tx_bd        *f = NULL;
2411         struct mbuf             *m;
2412         u_int32_t               frag, cur, cnt = 0;
2413         u_int16_t               csum_flags = 0;
2414         struct ifvlan           *ifv = NULL;
2415
2416         if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
2417             m_head->m_pkthdr.rcvif != NULL &&
2418             m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
2419                 ifv = m_head->m_pkthdr.rcvif->if_softc;
2420
2421         m = m_head;
2422         cur = frag = *txidx;
2423
2424         if (m_head->m_pkthdr.csum_flags) {
2425                 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2426                         csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2427                 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2428                         csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2429                 if (m_head->m_flags & M_LASTFRAG)
2430                         csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2431                 else if (m_head->m_flags & M_FRAG)
2432                         csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2433         }
2434         /*
2435          * Start packing the mbufs in this chain into
2436          * the fragment pointers. Stop when we run out
2437          * of fragments or hit the end of the mbuf chain.
2438          */
2439         for (m = m_head; m != NULL; m = m->m_next) {
2440                 if (m->m_len != 0) {
2441                         f = &sc->bge_rdata->bge_tx_ring[frag];
2442                         if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
2443                                 break;
2444                         BGE_HOSTADDR(f->bge_addr,
2445                             vtophys(mtod(m, vm_offset_t)));
2446                         f->bge_len = m->m_len;
2447                         f->bge_flags = csum_flags;
2448                         if (ifv != NULL) {
2449                                 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2450                                 f->bge_vlan_tag = ifv->ifv_tag;
2451                         } else {
2452                                 f->bge_vlan_tag = 0;
2453                         }
2454                         /*
2455                          * Sanity check: avoid coming within 16 descriptors
2456                          * of the end of the ring.
2457                          */
2458                         if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16)
2459                                 return(ENOBUFS);
2460                         cur = frag;
2461                         BGE_INC(frag, BGE_TX_RING_CNT);
2462                         cnt++;
2463                 }
2464         }
2465
2466         if (m != NULL)
2467                 return(ENOBUFS);
2468
2469         if (frag == sc->bge_tx_saved_considx)
2470                 return(ENOBUFS);
2471
2472         sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
2473         sc->bge_cdata.bge_tx_chain[cur] = m_head;
2474         sc->bge_txcnt += cnt;
2475
2476         *txidx = frag;
2477
2478         return(0);
2479 }
2480
2481 /*
2482  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2483  * to the mbuf data regions directly in the transmit descriptors.
2484  */
2485 static void
2486 bge_start(ifp)
2487         struct ifnet *ifp;
2488 {
2489         struct bge_softc *sc;
2490         struct mbuf *m_head = NULL;
2491         u_int32_t prodidx = 0;
2492
2493         sc = ifp->if_softc;
2494
2495         if (!sc->bge_link)
2496                 return;
2497
2498         prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
2499
2500         while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
2501                 m_head = ifq_poll(&ifp->if_snd);
2502                 if (m_head == NULL)
2503                         break;
2504
2505                 /*
2506                  * XXX
2507                  * safety overkill.  If this is a fragmented packet chain
2508                  * with delayed TCP/UDP checksums, then only encapsulate
2509                  * it if we have enough descriptors to handle the entire
2510                  * chain at once.
2511                  * (paranoia -- may not actually be needed)
2512                  */
2513                 if (m_head->m_flags & M_FIRSTFRAG &&
2514                     m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
2515                         if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2516                             m_head->m_pkthdr.csum_data + 16) {
2517                                 ifp->if_flags |= IFF_OACTIVE;
2518                                 break;
2519                         }
2520                 }
2521
2522                 /*
2523                  * Pack the data into the transmit ring. If we
2524                  * don't have room, set the OACTIVE flag and wait
2525                  * for the NIC to drain the ring.
2526                  */
2527                 if (bge_encap(sc, m_head, &prodidx)) {
2528                         ifp->if_flags |= IFF_OACTIVE;
2529                         break;
2530                 }
2531                 m_head = ifq_dequeue(&ifp->if_snd);
2532
2533                 BPF_MTAP(ifp, m_head);
2534         }
2535
2536         /* Transmit */
2537         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2538         /* 5700 b2 errata */
2539         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
2540                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2541
2542         /*
2543          * Set a timeout in case the chip goes out to lunch.
2544          */
2545         ifp->if_timer = 5;
2546
2547         return;
2548 }
2549
2550 static void
2551 bge_init(xsc)
2552         void *xsc;
2553 {
2554         struct bge_softc *sc = xsc;
2555         struct ifnet *ifp;
2556         u_int16_t *m;
2557         int s;
2558
2559         s = splimp();
2560
2561         ifp = &sc->arpcom.ac_if;
2562
2563         if (ifp->if_flags & IFF_RUNNING) {
2564                 splx(s);
2565                 return;
2566         }
2567
2568         /* Cancel pending I/O and flush buffers. */
2569         bge_stop(sc);
2570         bge_reset(sc);
2571         bge_chipinit(sc);
2572
2573         /*
2574          * Init the various state machines, ring
2575          * control blocks and firmware.
2576          */
2577         if (bge_blockinit(sc)) {
2578                 if_printf(ifp, "initialization failure\n");
2579                 splx(s);
2580                 return;
2581         }
2582
2583         ifp = &sc->arpcom.ac_if;
2584
2585         /* Specify MTU. */
2586         CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
2587             ETHER_HDR_LEN + ETHER_CRC_LEN);
2588
2589         /* Load our MAC address. */
2590         m = (u_int16_t *)&sc->arpcom.ac_enaddr[0];
2591         CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2592         CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2593
2594         /* Enable or disable promiscuous mode as needed. */
2595         if (ifp->if_flags & IFF_PROMISC) {
2596                 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
2597         } else {
2598                 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
2599         }
2600
2601         /* Program multicast filter. */
2602         bge_setmulti(sc);
2603
2604         /* Init RX ring. */
2605         bge_init_rx_ring_std(sc);
2606
2607         /*
2608          * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
2609          * memory to insure that the chip has in fact read the first
2610          * entry of the ring.
2611          */
2612         if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
2613                 u_int32_t               v, i;
2614                 for (i = 0; i < 10; i++) {
2615                         DELAY(20);
2616                         v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
2617                         if (v == (MCLBYTES - ETHER_ALIGN))
2618                                 break;
2619                 }
2620                 if (i == 10)
2621                         if_printf(ifp, "5705 A0 chip failed to load RX ring\n");
2622         }
2623
2624         /* Init jumbo RX ring. */
2625         if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2626                 bge_init_rx_ring_jumbo(sc);
2627
2628         /* Init our RX return ring index */
2629         sc->bge_rx_saved_considx = 0;
2630
2631         /* Init TX ring. */
2632         bge_init_tx_ring(sc);
2633
2634         /* Turn on transmitter */
2635         BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
2636
2637         /* Turn on receiver */
2638         BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2639
2640         /* Tell firmware we're alive. */
2641         BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2642
2643         /* Enable host interrupts. */
2644         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
2645         BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2646         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2647
2648         bge_ifmedia_upd(ifp);
2649
2650         ifp->if_flags |= IFF_RUNNING;
2651         ifp->if_flags &= ~IFF_OACTIVE;
2652
2653         splx(s);
2654
2655         callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2656 }
2657
2658 /*
2659  * Set media options.
2660  */
2661 static int
2662 bge_ifmedia_upd(ifp)
2663         struct ifnet *ifp;
2664 {
2665         struct bge_softc *sc;
2666         struct mii_data *mii;
2667         struct ifmedia *ifm;
2668
2669         sc = ifp->if_softc;
2670         ifm = &sc->bge_ifmedia;
2671
2672         /* If this is a 1000baseX NIC, enable the TBI port. */
2673         if (sc->bge_tbi) {
2674                 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2675                         return(EINVAL);
2676                 switch(IFM_SUBTYPE(ifm->ifm_media)) {
2677                 case IFM_AUTO:
2678                         break;
2679                 case IFM_1000_SX:
2680                         if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
2681                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
2682                                     BGE_MACMODE_HALF_DUPLEX);
2683                         } else {
2684                                 BGE_SETBIT(sc, BGE_MAC_MODE,
2685                                     BGE_MACMODE_HALF_DUPLEX);
2686                         }
2687                         break;
2688                 default:
2689                         return(EINVAL);
2690                 }
2691                 return(0);
2692         }
2693
2694         mii = device_get_softc(sc->bge_miibus);
2695         sc->bge_link = 0;
2696         if (mii->mii_instance) {
2697                 struct mii_softc *miisc;
2698                 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
2699                     miisc = LIST_NEXT(miisc, mii_list))
2700                         mii_phy_reset(miisc);
2701         }
2702         mii_mediachg(mii);
2703
2704         return(0);
2705 }
2706
2707 /*
2708  * Report current media status.
2709  */
2710 static void
2711 bge_ifmedia_sts(ifp, ifmr)
2712         struct ifnet *ifp;
2713         struct ifmediareq *ifmr;
2714 {
2715         struct bge_softc *sc;
2716         struct mii_data *mii;
2717
2718         sc = ifp->if_softc;
2719
2720         if (sc->bge_tbi) {
2721                 ifmr->ifm_status = IFM_AVALID;
2722                 ifmr->ifm_active = IFM_ETHER;
2723                 if (CSR_READ_4(sc, BGE_MAC_STS) &
2724                     BGE_MACSTAT_TBI_PCS_SYNCHED)
2725                         ifmr->ifm_status |= IFM_ACTIVE;
2726                 ifmr->ifm_active |= IFM_1000_SX;
2727                 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
2728                         ifmr->ifm_active |= IFM_HDX;    
2729                 else
2730                         ifmr->ifm_active |= IFM_FDX;
2731                 return;
2732         }
2733
2734         mii = device_get_softc(sc->bge_miibus);
2735         mii_pollstat(mii);
2736         ifmr->ifm_active = mii->mii_media_active;
2737         ifmr->ifm_status = mii->mii_media_status;
2738
2739         return;
2740 }
2741
2742 static int
2743 bge_ioctl(ifp, command, data, cr)
2744         struct ifnet *ifp;
2745         u_long command;
2746         caddr_t data;
2747         struct ucred *cr;
2748 {
2749         struct bge_softc *sc = ifp->if_softc;
2750         struct ifreq *ifr = (struct ifreq *) data;
2751         int s, mask, error = 0;
2752         struct mii_data *mii;
2753
2754         s = splimp();
2755
2756         switch(command) {
2757         case SIOCSIFADDR:
2758         case SIOCGIFADDR:
2759                 error = ether_ioctl(ifp, command, data);
2760                 break;
2761         case SIOCSIFMTU:
2762                 /* Disallow jumbo frames on 5705. */
2763                 if ((sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2764                     ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
2765                         error = EINVAL;
2766                 else {
2767                         ifp->if_mtu = ifr->ifr_mtu;
2768                         ifp->if_flags &= ~IFF_RUNNING;
2769                         bge_init(sc);
2770                 }
2771                 break;
2772         case SIOCSIFFLAGS:
2773                 if (ifp->if_flags & IFF_UP) {
2774                         /*
2775                          * If only the state of the PROMISC flag changed,
2776                          * then just use the 'set promisc mode' command
2777                          * instead of reinitializing the entire NIC. Doing
2778                          * a full re-init means reloading the firmware and
2779                          * waiting for it to start up, which may take a
2780                          * second or two.
2781                          */
2782                         if (ifp->if_flags & IFF_RUNNING &&
2783                             ifp->if_flags & IFF_PROMISC &&
2784                             !(sc->bge_if_flags & IFF_PROMISC)) {
2785                                 BGE_SETBIT(sc, BGE_RX_MODE,
2786                                     BGE_RXMODE_RX_PROMISC);
2787                         } else if (ifp->if_flags & IFF_RUNNING &&
2788                             !(ifp->if_flags & IFF_PROMISC) &&
2789                             sc->bge_if_flags & IFF_PROMISC) {
2790                                 BGE_CLRBIT(sc, BGE_RX_MODE,
2791                                     BGE_RXMODE_RX_PROMISC);
2792                         } else
2793                                 bge_init(sc);
2794                 } else {
2795                         if (ifp->if_flags & IFF_RUNNING) {
2796                                 bge_stop(sc);
2797                         }
2798                 }
2799                 sc->bge_if_flags = ifp->if_flags;
2800                 error = 0;
2801                 break;
2802         case SIOCADDMULTI:
2803         case SIOCDELMULTI:
2804                 if (ifp->if_flags & IFF_RUNNING) {
2805                         bge_setmulti(sc);
2806                         error = 0;
2807                 }
2808                 break;
2809         case SIOCSIFMEDIA:
2810         case SIOCGIFMEDIA:
2811                 if (sc->bge_tbi) {
2812                         error = ifmedia_ioctl(ifp, ifr,
2813                             &sc->bge_ifmedia, command);
2814                 } else {
2815                         mii = device_get_softc(sc->bge_miibus);
2816                         error = ifmedia_ioctl(ifp, ifr,
2817                             &mii->mii_media, command);
2818                 }
2819                 break;
2820         case SIOCSIFCAP:
2821                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2822                 if (mask & IFCAP_HWCSUM) {
2823                         if (IFCAP_HWCSUM & ifp->if_capenable)
2824                                 ifp->if_capenable &= ~IFCAP_HWCSUM;
2825                         else
2826                                 ifp->if_capenable |= IFCAP_HWCSUM;
2827                 }
2828                 error = 0;
2829                 break;
2830         default:
2831                 error = EINVAL;
2832                 break;
2833         }
2834
2835         (void)splx(s);
2836
2837         return(error);
2838 }
2839
2840 static void
2841 bge_watchdog(ifp)
2842         struct ifnet *ifp;
2843 {
2844         struct bge_softc *sc;
2845
2846         sc = ifp->if_softc;
2847
2848         if_printf(ifp, "watchdog timeout -- resetting\n");
2849
2850         ifp->if_flags &= ~IFF_RUNNING;
2851         bge_init(sc);
2852
2853         ifp->if_oerrors++;
2854
2855         return;
2856 }
2857
2858 /*
2859  * Stop the adapter and free any mbufs allocated to the
2860  * RX and TX lists.
2861  */
2862 static void
2863 bge_stop(sc)
2864         struct bge_softc *sc;
2865 {
2866         struct ifnet *ifp;
2867         struct ifmedia_entry *ifm;
2868         struct mii_data *mii = NULL;
2869         int mtmp, itmp;
2870
2871         ifp = &sc->arpcom.ac_if;
2872
2873         if (!sc->bge_tbi)
2874                 mii = device_get_softc(sc->bge_miibus);
2875
2876         callout_stop(&sc->bge_stat_timer);
2877
2878         /*
2879          * Disable all of the receiver blocks
2880          */
2881         BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2882         BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2883         BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2884         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
2885                 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2886         BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
2887         BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2888         BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
2889
2890         /*
2891          * Disable all of the transmit blocks
2892          */
2893         BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2894         BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2895         BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2896         BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
2897         BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
2898         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
2899                 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2900         BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2901
2902         /*
2903          * Shut down all of the memory managers and related
2904          * state machines.
2905          */
2906         BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
2907         BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
2908         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
2909                 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2910         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
2911         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
2912         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
2913                 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
2914                 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2915         }
2916
2917         /* Disable host interrupts. */
2918         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2919         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2920
2921         /*
2922          * Tell firmware we're shutting down.
2923          */
2924         BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2925
2926         /* Free the RX lists. */
2927         bge_free_rx_ring_std(sc);
2928
2929         /* Free jumbo RX list. */
2930         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
2931                 bge_free_rx_ring_jumbo(sc);
2932
2933         /* Free TX buffers. */
2934         bge_free_tx_ring(sc);
2935
2936         /*
2937          * Isolate/power down the PHY, but leave the media selection
2938          * unchanged so that things will be put back to normal when
2939          * we bring the interface back up.
2940          */
2941         if (!sc->bge_tbi) {
2942                 itmp = ifp->if_flags;
2943                 ifp->if_flags |= IFF_UP;
2944                 ifm = mii->mii_media.ifm_cur;
2945                 mtmp = ifm->ifm_media;
2946                 ifm->ifm_media = IFM_ETHER|IFM_NONE;
2947                 mii_mediachg(mii);
2948                 ifm->ifm_media = mtmp;
2949                 ifp->if_flags = itmp;
2950         }
2951
2952         sc->bge_link = 0;
2953
2954         sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
2955
2956         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2957
2958         return;
2959 }
2960
2961 /*
2962  * Stop all chip I/O so that the kernel's probe routines don't
2963  * get confused by errant DMAs when rebooting.
2964  */
2965 static void
2966 bge_shutdown(dev)
2967         device_t dev;
2968 {
2969         struct bge_softc *sc;
2970
2971         sc = device_get_softc(dev);
2972
2973         bge_stop(sc); 
2974         bge_reset(sc);
2975
2976         return;
2977 }