- Don't disable interrupt in bge_intr(), just acknowledge it. This avoids
[dragonfly.git] / sys / dev / netif / bge / if_bge.c
1 /*
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
34  * $DragonFly: src/sys/dev/netif/bge/if_bge.c,v 1.71 2007/04/26 11:58:10 sephe Exp $
35  *
36  */
37
38 /*
39  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
40  * 
41  * Written by Bill Paul <wpaul@windriver.com>
42  * Senior Engineer, Wind River Systems
43  */
44
45 /*
46  * The Broadcom BCM5700 is based on technology originally developed by
47  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
49  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
51  * frames, highly configurable RX filtering, and 16 RX and TX queues
52  * (which, along with RX filter rules, can be used for QOS applications).
53  * Other features, such as TCP segmentation, may be available as part
54  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55  * firmware images can be stored in hardware and need not be compiled
56  * into the driver.
57  *
58  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
60  * 
61  * The BCM5701 is a single-chip solution incorporating both the BCM5700
62  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63  * does not support external SSRAM.
64  *
65  * Broadcom also produces a variation of the BCM5700 under the "Altima"
66  * brand name, which is functionally similar but lacks PCI-X support.
67  *
68  * Without external SSRAM, you can only have at most 4 TX rings,
69  * and the use of the mini RX ring is disabled. This seems to imply
70  * that these features are simply not available on the BCM5701. As a
71  * result, this driver does not implement any support for the mini RX
72  * ring.
73  */
74
75 #include <sys/param.h>
76 #include <sys/bus.h>
77 #include <sys/endian.h>
78 #include <sys/kernel.h>
79 #include <sys/mbuf.h>
80 #include <sys/malloc.h>
81 #include <sys/queue.h>
82 #include <sys/rman.h>
83 #include <sys/serialize.h>
84 #include <sys/socket.h>
85 #include <sys/sockio.h>
86
87 #include <net/bpf.h>
88 #include <net/ethernet.h>
89 #include <net/if.h>
90 #include <net/if_arp.h>
91 #include <net/if_dl.h>
92 #include <net/if_media.h>
93 #include <net/if_types.h>
94 #include <net/ifq_var.h>
95 #include <net/vlan/if_vlan_var.h>
96
97 #include <dev/netif/mii_layer/mii.h>
98 #include <dev/netif/mii_layer/miivar.h>
99 #include <dev/netif/mii_layer/brgphyreg.h>
100
101 #include <bus/pci/pcidevs.h>
102 #include <bus/pci/pcireg.h>
103 #include <bus/pci/pcivar.h>
104
105 #include <dev/netif/bge/if_bgereg.h>
106
107 /* "device miibus" required.  See GENERIC if you get errors here. */
108 #include "miibus_if.h"
109
110 #define BGE_CSUM_FEATURES       (CSUM_IP | CSUM_TCP | CSUM_UDP)
111 #define BGE_MIN_FRAME           60
112
113 /*
114  * Various supported device vendors/types and their names. Note: the
115  * spec seems to indicate that the hardware still has Alteon's vendor
116  * ID burned into it, though it will always be overriden by the vendor
117  * ID in the EEPROM. Just to be safe, we cover all possibilities.
118  */
119 #define BGE_DEVDESC_MAX         64      /* Maximum device description length */
120
121 static struct bge_type bge_devs[] = {
122         { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700,
123                 "Alteon BCM5700 Gigabit Ethernet" },
124         { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701,
125                 "Alteon BCM5701 Gigabit Ethernet" },
126         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700,
127                 "Broadcom BCM5700 Gigabit Ethernet" },
128         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701,
129                 "Broadcom BCM5701 Gigabit Ethernet" },
130         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X,
131                 "Broadcom BCM5702X Gigabit Ethernet" },
132         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT,
133                 "Broadcom BCM5702 Gigabit Ethernet" },
134         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X,
135                 "Broadcom BCM5703X Gigabit Ethernet" },
136         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703A3,
137                 "Broadcom BCM5703 Gigabit Ethernet" },
138         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C,
139                 "Broadcom BCM5704C Dual Gigabit Ethernet" },
140         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S,
141                 "Broadcom BCM5704S Dual Gigabit Ethernet" },
142         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705,
143                 "Broadcom BCM5705 Gigabit Ethernet" },
144         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K,
145                 "Broadcom BCM5705K Gigabit Ethernet" },
146         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M,
147                 "Broadcom BCM5705M Gigabit Ethernet" },
148         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT,
149                 "Broadcom BCM5705M Gigabit Ethernet" },
150         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714,
151                 "Broadcom BCM5714C Gigabit Ethernet" },
152         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721,
153                 "Broadcom BCM5721 Gigabit Ethernet" },
154         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750,
155                 "Broadcom BCM5750 Gigabit Ethernet" },
156         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750M,
157                 "Broadcom BCM5750M Gigabit Ethernet" },
158         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751,
159                 "Broadcom BCM5751 Gigabit Ethernet" },
160         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M,
161                 "Broadcom BCM5751M Gigabit Ethernet" },
162         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752,
163                 "Broadcom BCM5752 Gigabit Ethernet" },
164         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782,
165                 "Broadcom BCM5782 Gigabit Ethernet" },
166         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788,
167                 "Broadcom BCM5788 Gigabit Ethernet" },
168         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789,
169                 "Broadcom BCM5789 Gigabit Ethernet" },
170         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901,
171                 "Broadcom BCM5901 Fast Ethernet" },
172         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2,
173                 "Broadcom BCM5901A2 Fast Ethernet" },
174         { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
175                 "SysKonnect Gigabit Ethernet" },
176         { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000,
177                 "Altima AC1000 Gigabit Ethernet" },
178         { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001,
179                 "Altima AC1002 Gigabit Ethernet" },
180         { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100,
181                 "Altima AC9100 Gigabit Ethernet" },
182         { 0, 0, NULL }
183 };
184
185 static int      bge_probe(device_t);
186 static int      bge_attach(device_t);
187 static int      bge_detach(device_t);
188 static void     bge_release_resources(struct bge_softc *);
189 static void     bge_txeof(struct bge_softc *);
190 static void     bge_rxeof(struct bge_softc *);
191
192 static void     bge_tick(void *);
193 static void     bge_tick_serialized(void *);
194 static void     bge_stats_update(struct bge_softc *);
195 static void     bge_stats_update_regs(struct bge_softc *);
196 static int      bge_encap(struct bge_softc *, struct mbuf *, uint32_t *);
197
198 static void     bge_intr(void *);
199 static void     bge_start(struct ifnet *);
200 static int      bge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
201 static void     bge_init(void *);
202 static void     bge_stop(struct bge_softc *);
203 static void     bge_watchdog(struct ifnet *);
204 static void     bge_shutdown(device_t);
205 static int      bge_suspend(device_t);
206 static int      bge_resume(device_t);
207 static int      bge_ifmedia_upd(struct ifnet *);
208 static void     bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
209
210 static uint8_t  bge_eeprom_getbyte(struct bge_softc *, uint32_t, uint8_t *);
211 static int      bge_read_eeprom(struct bge_softc *, caddr_t, uint32_t, size_t);
212
213 static void     bge_setmulti(struct bge_softc *);
214 static void     bge_setpromisc(struct bge_softc *);
215
216 static int      bge_alloc_jumbo_mem(struct bge_softc *);
217 static void     bge_free_jumbo_mem(struct bge_softc *);
218 static struct bge_jslot
219                 *bge_jalloc(struct bge_softc *);
220 static void     bge_jfree(void *);
221 static void     bge_jref(void *);
222 static int      bge_newbuf_std(struct bge_softc *, int, struct mbuf *);
223 static int      bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
224 static int      bge_init_rx_ring_std(struct bge_softc *);
225 static void     bge_free_rx_ring_std(struct bge_softc *);
226 static int      bge_init_rx_ring_jumbo(struct bge_softc *);
227 static void     bge_free_rx_ring_jumbo(struct bge_softc *);
228 static void     bge_free_tx_ring(struct bge_softc *);
229 static int      bge_init_tx_ring(struct bge_softc *);
230
231 static int      bge_chipinit(struct bge_softc *);
232 static int      bge_blockinit(struct bge_softc *);
233
234 static uint32_t bge_readmem_ind(struct bge_softc *, uint32_t);
235 static void     bge_writemem_ind(struct bge_softc *, uint32_t, uint32_t);
236 #ifdef notdef
237 static uint32_t bge_readreg_ind(struct bge_softc *, uint32_t);
238 #endif
239 static void     bge_writereg_ind(struct bge_softc *, uint32_t, uint32_t);
240
241 static int      bge_miibus_readreg(device_t, int, int);
242 static int      bge_miibus_writereg(device_t, int, int, int);
243 static void     bge_miibus_statchg(device_t);
244
245 static void     bge_reset(struct bge_softc *);
246
247 static void     bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
248 static void     bge_dma_map_mbuf(void *, bus_dma_segment_t *, int,
249                                  bus_size_t, int);
250 static int      bge_dma_alloc(struct bge_softc *);
251 static void     bge_dma_free(struct bge_softc *);
252 static int      bge_dma_block_alloc(struct bge_softc *, bus_size_t,
253                                     bus_dma_tag_t *, bus_dmamap_t *,
254                                     void **, bus_addr_t *);
255 static void     bge_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
256
257 /*
258  * Set following tunable to 1 for some IBM blade servers with the DNLK
259  * switch module. Auto negotiation is broken for those configurations.
260  */
261 static int      bge_fake_autoneg = 0;
262 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg);
263
264 static device_method_t bge_methods[] = {
265         /* Device interface */
266         DEVMETHOD(device_probe,         bge_probe),
267         DEVMETHOD(device_attach,        bge_attach),
268         DEVMETHOD(device_detach,        bge_detach),
269         DEVMETHOD(device_shutdown,      bge_shutdown),
270         DEVMETHOD(device_suspend,       bge_suspend),
271         DEVMETHOD(device_resume,        bge_resume),
272
273         /* bus interface */
274         DEVMETHOD(bus_print_child,      bus_generic_print_child),
275         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
276
277         /* MII interface */
278         DEVMETHOD(miibus_readreg,       bge_miibus_readreg),
279         DEVMETHOD(miibus_writereg,      bge_miibus_writereg),
280         DEVMETHOD(miibus_statchg,       bge_miibus_statchg),
281
282         { 0, 0 }
283 };
284
285 static DEFINE_CLASS_0(bge, bge_driver, bge_methods, sizeof(struct bge_softc));
286 static devclass_t bge_devclass;
287
288 DECLARE_DUMMY_MODULE(if_bge);
289 DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, 0, 0);
290 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
291
292 static uint32_t
293 bge_readmem_ind(struct bge_softc *sc, uint32_t off)
294 {
295         device_t dev = sc->bge_dev;
296
297         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
298         return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
299 }
300
301 static void
302 bge_writemem_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
303 {
304         device_t dev = sc->bge_dev;
305
306         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
307         pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
308 }
309
310 #ifdef notdef
311 static uint32_t
312 bge_readreg_ind(struct bge_softc *sc, uin32_t off)
313 {
314         device_t dev = sc->bge_dev;
315
316         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
317         return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
318 }
319 #endif
320
321 static void
322 bge_writereg_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
323 {
324         device_t dev = sc->bge_dev;
325
326         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
327         pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
328 }
329
330 /*
331  * Read a byte of data stored in the EEPROM at address 'addr.' The
332  * BCM570x supports both the traditional bitbang interface and an
333  * auto access interface for reading the EEPROM. We use the auto
334  * access method.
335  */
336 static uint8_t
337 bge_eeprom_getbyte(struct bge_softc *sc, uint32_t addr, uint8_t *dest)
338 {
339         int i;
340         uint32_t byte = 0;
341
342         /*
343          * Enable use of auto EEPROM access so we can avoid
344          * having to use the bitbang method.
345          */
346         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
347
348         /* Reset the EEPROM, load the clock period. */
349         CSR_WRITE_4(sc, BGE_EE_ADDR,
350             BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
351         DELAY(20);
352
353         /* Issue the read EEPROM command. */
354         CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
355
356         /* Wait for completion */
357         for(i = 0; i < BGE_TIMEOUT * 10; i++) {
358                 DELAY(10);
359                 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
360                         break;
361         }
362
363         if (i == BGE_TIMEOUT) {
364                 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
365                 return(1);
366         }
367
368         /* Get result. */
369         byte = CSR_READ_4(sc, BGE_EE_DATA);
370
371         *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
372
373         return(0);
374 }
375
376 /*
377  * Read a sequence of bytes from the EEPROM.
378  */
379 static int
380 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, uint32_t off, size_t len)
381 {
382         size_t i;
383         int err;
384         uint8_t byte;
385
386         for (byte = 0, err = 0, i = 0; i < len; i++) {
387                 err = bge_eeprom_getbyte(sc, off + i, &byte);
388                 if (err)
389                         break;
390                 *(dest + i) = byte;
391         }
392
393         return(err ? 1 : 0);
394 }
395
396 static int
397 bge_miibus_readreg(device_t dev, int phy, int reg)
398 {
399         struct bge_softc *sc;
400         struct ifnet *ifp;
401         uint32_t val, autopoll;
402         int i;
403
404         sc = device_get_softc(dev);
405         ifp = &sc->arpcom.ac_if;
406
407         /*
408          * Broadcom's own driver always assumes the internal
409          * PHY is at GMII address 1. On some chips, the PHY responds
410          * to accesses at all addresses, which could cause us to
411          * bogusly attach the PHY 32 times at probe type. Always
412          * restricting the lookup to address 1 is simpler than
413          * trying to figure out which chips revisions should be
414          * special-cased.
415          */
416         if (phy != 1)
417                 return(0);
418
419         /* Reading with autopolling on may trigger PCI errors */
420         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
421         if (autopoll & BGE_MIMODE_AUTOPOLL) {
422                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
423                 DELAY(40);
424         }
425
426         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
427             BGE_MIPHY(phy)|BGE_MIREG(reg));
428
429         for (i = 0; i < BGE_TIMEOUT; i++) {
430                 val = CSR_READ_4(sc, BGE_MI_COMM);
431                 if (!(val & BGE_MICOMM_BUSY))
432                         break;
433         }
434
435         if (i == BGE_TIMEOUT) {
436                 if_printf(ifp, "PHY read timed out\n");
437                 val = 0;
438                 goto done;
439         }
440
441         val = CSR_READ_4(sc, BGE_MI_COMM);
442
443 done:
444         if (autopoll & BGE_MIMODE_AUTOPOLL) {
445                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
446                 DELAY(40);
447         }
448
449         if (val & BGE_MICOMM_READFAIL)
450                 return(0);
451
452         return(val & 0xFFFF);
453 }
454
455 static int
456 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
457 {
458         struct bge_softc *sc;
459         uint32_t autopoll;
460         int i;
461
462         sc = device_get_softc(dev);
463
464         /* Reading with autopolling on may trigger PCI errors */
465         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
466         if (autopoll & BGE_MIMODE_AUTOPOLL) {
467                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
468                 DELAY(40);
469         }
470
471         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
472             BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
473
474         for (i = 0; i < BGE_TIMEOUT; i++) {
475                 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
476                         break;
477         }
478
479         if (autopoll & BGE_MIMODE_AUTOPOLL) {
480                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
481                 DELAY(40);
482         }
483
484         if (i == BGE_TIMEOUT) {
485                 if_printf(&sc->arpcom.ac_if, "PHY read timed out\n");
486                 return(0);
487         }
488
489         return(0);
490 }
491
492 static void
493 bge_miibus_statchg(device_t dev)
494 {
495         struct bge_softc *sc;
496         struct mii_data *mii;
497
498         sc = device_get_softc(dev);
499         mii = device_get_softc(sc->bge_miibus);
500
501         BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
502         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
503                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
504         } else {
505                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
506         }
507
508         if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
509                 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
510         } else {
511                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
512         }
513 }
514
515 /*
516  * Memory management for jumbo frames.
517  */
518 static int
519 bge_alloc_jumbo_mem(struct bge_softc *sc)
520 {
521         struct ifnet *ifp = &sc->arpcom.ac_if;
522         struct bge_jslot *entry;
523         uint8_t *ptr;
524         bus_addr_t paddr;
525         int i, error;
526
527         /*
528          * Create tag for jumbo mbufs.
529          * This is really a bit of a kludge. We allocate a special
530          * jumbo buffer pool which (thanks to the way our DMA
531          * memory allocation works) will consist of contiguous
532          * pages. This means that even though a jumbo buffer might
533          * be larger than a page size, we don't really need to
534          * map it into more than one DMA segment. However, the
535          * default mbuf tag will result in multi-segment mappings,
536          * so we have to create a special jumbo mbuf tag that
537          * lets us get away with mapping the jumbo buffers as
538          * a single segment. I think eventually the driver should
539          * be changed so that it uses ordinary mbufs and cluster
540          * buffers, i.e. jumbo frames can span multiple DMA
541          * descriptors. But that's a project for another day.
542          */
543
544         /*
545          * Create DMA stuffs for jumbo RX ring.
546          */
547         error = bge_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ,
548                                     &sc->bge_cdata.bge_rx_jumbo_ring_tag,
549                                     &sc->bge_cdata.bge_rx_jumbo_ring_map,
550                                     (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
551                                     &sc->bge_ldata.bge_rx_jumbo_ring_paddr);
552         if (error) {
553                 if_printf(ifp, "could not create jumbo RX ring\n");
554                 return error;
555         }
556
557         /*
558          * Create DMA stuffs for jumbo buffer block.
559          */
560         error = bge_dma_block_alloc(sc, BGE_JMEM,
561                                     &sc->bge_cdata.bge_jumbo_tag,
562                                     &sc->bge_cdata.bge_jumbo_map,
563                                     (void **)&sc->bge_ldata.bge_jumbo_buf,
564                                     &paddr);
565         if (error) {
566                 if_printf(ifp, "could not create jumbo buffer\n");
567                 return error;
568         }
569
570         SLIST_INIT(&sc->bge_jfree_listhead);
571
572         /*
573          * Now divide it up into 9K pieces and save the addresses
574          * in an array. Note that we play an evil trick here by using
575          * the first few bytes in the buffer to hold the the address
576          * of the softc structure for this interface. This is because
577          * bge_jfree() needs it, but it is called by the mbuf management
578          * code which will not pass it to us explicitly.
579          */
580         for (i = 0, ptr = sc->bge_ldata.bge_jumbo_buf; i < BGE_JSLOTS; i++) {
581                 entry = &sc->bge_cdata.bge_jslots[i];
582                 entry->bge_sc = sc;
583                 entry->bge_buf = ptr;
584                 entry->bge_paddr = paddr;
585                 entry->bge_inuse = 0;
586                 entry->bge_slot = i;
587                 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jslot_link);
588
589                 ptr += BGE_JLEN;
590                 paddr += BGE_JLEN;
591         }
592         return 0;
593 }
594
595 static void
596 bge_free_jumbo_mem(struct bge_softc *sc)
597 {
598         /* Destroy jumbo RX ring. */
599         bge_dma_block_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
600                            sc->bge_cdata.bge_rx_jumbo_ring_map,
601                            sc->bge_ldata.bge_rx_jumbo_ring);
602
603         /* Destroy jumbo buffer block. */
604         bge_dma_block_free(sc->bge_cdata.bge_jumbo_tag,
605                            sc->bge_cdata.bge_jumbo_map,
606                            sc->bge_ldata.bge_jumbo_buf);
607 }
608
609 /*
610  * Allocate a jumbo buffer.
611  */
612 static struct bge_jslot *
613 bge_jalloc(struct bge_softc *sc)
614 {
615         struct bge_jslot *entry;
616
617         lwkt_serialize_enter(&sc->bge_jslot_serializer);
618         entry = SLIST_FIRST(&sc->bge_jfree_listhead);
619         if (entry) {
620                 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jslot_link);
621                 entry->bge_inuse = 1;
622         } else {
623                 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
624         }
625         lwkt_serialize_exit(&sc->bge_jslot_serializer);
626         return(entry);
627 }
628
629 /*
630  * Adjust usage count on a jumbo buffer.
631  */
632 static void
633 bge_jref(void *arg)
634 {
635         struct bge_jslot *entry = (struct bge_jslot *)arg;
636         struct bge_softc *sc = entry->bge_sc;
637
638         if (sc == NULL)
639                 panic("bge_jref: can't find softc pointer!");
640
641         if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
642                 panic("bge_jref: asked to reference buffer "
643                     "that we don't manage!");
644         } else if (entry->bge_inuse == 0) {
645                 panic("bge_jref: buffer already free!");
646         } else {
647                 atomic_add_int(&entry->bge_inuse, 1);
648         }
649 }
650
651 /*
652  * Release a jumbo buffer.
653  */
654 static void
655 bge_jfree(void *arg)
656 {
657         struct bge_jslot *entry = (struct bge_jslot *)arg;
658         struct bge_softc *sc = entry->bge_sc;
659
660         if (sc == NULL)
661                 panic("bge_jfree: can't find softc pointer!");
662
663         if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
664                 panic("bge_jfree: asked to free buffer that we don't manage!");
665         } else if (entry->bge_inuse == 0) {
666                 panic("bge_jfree: buffer already free!");
667         } else {
668                 /*
669                  * Possible MP race to 0, use the serializer.  The atomic insn
670                  * is still needed for races against bge_jref().
671                  */
672                 lwkt_serialize_enter(&sc->bge_jslot_serializer);
673                 atomic_subtract_int(&entry->bge_inuse, 1);
674                 if (entry->bge_inuse == 0) {
675                         SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 
676                                           entry, jslot_link);
677                 }
678                 lwkt_serialize_exit(&sc->bge_jslot_serializer);
679         }
680 }
681
682
683 /*
684  * Intialize a standard receive ring descriptor.
685  */
686 static int
687 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m)
688 {
689         struct mbuf *m_new = NULL;
690         struct bge_dmamap_arg ctx;
691         bus_dma_segment_t seg;
692         struct bge_rx_bd *r;
693         int error;
694
695         if (m == NULL) {
696                 m_new = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
697                 if (m_new == NULL)
698                         return ENOBUFS;
699         } else {
700                 m_new = m;
701                 m_new->m_data = m_new->m_ext.ext_buf;
702         }
703         m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
704
705         if (!sc->bge_rx_alignment_bug)
706                 m_adj(m_new, ETHER_ALIGN);
707
708         ctx.bge_maxsegs = 1;
709         ctx.bge_segs = &seg;
710         error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag,
711                                      sc->bge_cdata.bge_rx_std_dmamap[i],
712                                      m_new, bge_dma_map_mbuf, &ctx,
713                                      BUS_DMA_NOWAIT);
714         if (error || ctx.bge_maxsegs == 0) {
715                 if (m == NULL)
716                         m_freem(m_new);
717                 return ENOMEM;
718         }
719
720         sc->bge_cdata.bge_rx_std_chain[i] = m_new;
721
722         r = &sc->bge_ldata.bge_rx_std_ring[i];
723         r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_segs[0].ds_addr);
724         r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_segs[0].ds_addr);
725         r->bge_flags = BGE_RXBDFLAG_END;
726         r->bge_len = m_new->m_len;
727         r->bge_idx = i;
728
729         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
730                         sc->bge_cdata.bge_rx_std_dmamap[i],
731                         BUS_DMASYNC_PREREAD);
732         return 0;
733 }
734
735 /*
736  * Initialize a jumbo receive ring descriptor. This allocates
737  * a jumbo buffer from the pool managed internally by the driver.
738  */
739 static int
740 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
741 {
742         struct mbuf *m_new = NULL;
743         struct bge_jslot *buf;
744         struct bge_rx_bd *r;
745         bus_addr_t paddr;
746
747         if (m == NULL) {
748                 /* Allocate the mbuf. */
749                 MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
750                 if (m_new == NULL)
751                         return(ENOBUFS);
752
753                 /* Allocate the jumbo buffer */
754                 buf = bge_jalloc(sc);
755                 if (buf == NULL) {
756                         m_freem(m_new);
757                         if_printf(&sc->arpcom.ac_if, "jumbo allocation failed "
758                             "-- packet dropped!\n");
759                         return ENOBUFS;
760                 }
761
762                 /* Attach the buffer to the mbuf. */
763                 m_new->m_ext.ext_arg = buf;
764                 m_new->m_ext.ext_buf = buf->bge_buf;
765                 m_new->m_ext.ext_free = bge_jfree;
766                 m_new->m_ext.ext_ref = bge_jref;
767                 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
768
769                 m_new->m_flags |= M_EXT;
770         } else {
771                 KKASSERT(m->m_flags & M_EXT);
772                 m_new = m;
773                 buf = m_new->m_ext.ext_arg;
774         }
775         m_new->m_data = m_new->m_ext.ext_buf;
776         m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
777
778         paddr = buf->bge_paddr;
779         if (!sc->bge_rx_alignment_bug) {
780                 m_adj(m_new, ETHER_ALIGN);
781                 paddr += ETHER_ALIGN;
782         }
783
784         /* Set up the descriptor. */
785         sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
786
787         r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
788         r->bge_addr.bge_addr_lo = BGE_ADDR_LO(paddr);
789         r->bge_addr.bge_addr_hi = BGE_ADDR_HI(paddr);
790         r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
791         r->bge_len = m_new->m_len;
792         r->bge_idx = i;
793
794         return 0;
795 }
796
797 /*
798  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
799  * that's 1MB or memory, which is a lot. For now, we fill only the first
800  * 256 ring entries and hope that our CPU is fast enough to keep up with
801  * the NIC.
802  */
803 static int
804 bge_init_rx_ring_std(struct bge_softc *sc)
805 {
806         int i;
807
808         for (i = 0; i < BGE_SSLOTS; i++) {
809                 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
810                         return(ENOBUFS);
811         };
812
813         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
814                         sc->bge_cdata.bge_rx_std_ring_map,
815                         BUS_DMASYNC_PREWRITE);
816
817         sc->bge_std = i - 1;
818         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
819
820         return(0);
821 }
822
823 static void
824 bge_free_rx_ring_std(struct bge_softc *sc)
825 {
826         int i;
827
828         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
829                 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
830                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
831                                           sc->bge_cdata.bge_rx_std_dmamap[i]);
832                         m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
833                         sc->bge_cdata.bge_rx_std_chain[i] = NULL;
834                 }
835                 bzero(&sc->bge_ldata.bge_rx_std_ring[i],
836                     sizeof(struct bge_rx_bd));
837         }
838 }
839
840 static int
841 bge_init_rx_ring_jumbo(struct bge_softc *sc)
842 {
843         int i;
844         struct bge_rcb *rcb;
845
846         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
847                 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
848                         return(ENOBUFS);
849         };
850
851         bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
852                         sc->bge_cdata.bge_rx_jumbo_ring_map,
853                         BUS_DMASYNC_PREWRITE);
854
855         sc->bge_jumbo = i - 1;
856
857         rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
858         rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
859         CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
860
861         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
862
863         return(0);
864 }
865
866 static void
867 bge_free_rx_ring_jumbo(struct bge_softc *sc)
868 {
869         int i;
870
871         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
872                 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
873                         m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
874                         sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
875                 }
876                 bzero(&sc->bge_ldata.bge_rx_jumbo_ring[i],
877                     sizeof(struct bge_rx_bd));
878         }
879 }
880
881 static void
882 bge_free_tx_ring(struct bge_softc *sc)
883 {
884         int i;
885
886         for (i = 0; i < BGE_TX_RING_CNT; i++) {
887                 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
888                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
889                                           sc->bge_cdata.bge_tx_dmamap[i]);
890                         m_freem(sc->bge_cdata.bge_tx_chain[i]);
891                         sc->bge_cdata.bge_tx_chain[i] = NULL;
892                 }
893                 bzero(&sc->bge_ldata.bge_tx_ring[i],
894                     sizeof(struct bge_tx_bd));
895         }
896 }
897
898 static int
899 bge_init_tx_ring(struct bge_softc *sc)
900 {
901         sc->bge_txcnt = 0;
902         sc->bge_tx_saved_considx = 0;
903         sc->bge_tx_prodidx = 0;
904
905         /* Initialize transmit producer index for host-memory send ring. */
906         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
907
908         /* 5700 b2 errata */
909         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
910                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
911
912         CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
913         /* 5700 b2 errata */
914         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
915                 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
916
917         return(0);
918 }
919
920 static void
921 bge_setmulti(struct bge_softc *sc)
922 {
923         struct ifnet *ifp;
924         struct ifmultiaddr *ifma;
925         uint32_t hashes[4] = { 0, 0, 0, 0 };
926         int h, i;
927
928         ifp = &sc->arpcom.ac_if;
929
930         if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
931                 for (i = 0; i < 4; i++)
932                         CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
933                 return;
934         }
935
936         /* First, zot all the existing filters. */
937         for (i = 0; i < 4; i++)
938                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
939
940         /* Now program new ones. */
941         LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
942                 if (ifma->ifma_addr->sa_family != AF_LINK)
943                         continue;
944                 h = ether_crc32_le(
945                     LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
946                     ETHER_ADDR_LEN) & 0x7f;
947                 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
948         }
949
950         for (i = 0; i < 4; i++)
951                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
952 }
953
954 /*
955  * Do endian, PCI and DMA initialization. Also check the on-board ROM
956  * self-test results.
957  */
958 static int
959 bge_chipinit(struct bge_softc *sc)
960 {
961         int i;
962         uint32_t dma_rw_ctl;
963
964         /* Set endian type before we access any non-PCI registers. */
965         pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
966
967         /*
968          * Check the 'ROM failed' bit on the RX CPU to see if
969          * self-tests passed.
970          */
971         if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
972                 if_printf(&sc->arpcom.ac_if,
973                           "RX CPU self-diagnostics failed!\n");
974                 return(ENODEV);
975         }
976
977         /* Clear the MAC control register */
978         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
979
980         /*
981          * Clear the MAC statistics block in the NIC's
982          * internal memory.
983          */
984         for (i = BGE_STATS_BLOCK;
985             i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
986                 BGE_MEMWIN_WRITE(sc, i, 0);
987
988         for (i = BGE_STATUS_BLOCK;
989             i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
990                 BGE_MEMWIN_WRITE(sc, i, 0);
991
992         /* Set up the PCI DMA control register. */
993         if (sc->bge_pcie) {
994                 /* PCI Express */
995                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
996                     (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
997                     (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
998         } else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
999                    BGE_PCISTATE_PCI_BUSMODE) {
1000                 /* Conventional PCI bus */
1001                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1002                     (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1003                     (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1004                     (0x0F);
1005         } else {
1006                 /* PCI-X bus */
1007                 /*
1008                  * The 5704 uses a different encoding of read/write
1009                  * watermarks.
1010                  */
1011                 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1012                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1013                             (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1014                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1015                 else
1016                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1017                             (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1018                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1019                             (0x0F);
1020
1021                 /*
1022                  * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1023                  * for hardware bugs.
1024                  */
1025                 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1026                     sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1027                         uint32_t tmp;
1028
1029                         tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1030                         if (tmp == 0x6 || tmp == 0x7)
1031                                 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1032                 }
1033         }
1034
1035         if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1036             sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1037             sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1038             sc->bge_asicrev == BGE_ASICREV_BCM5750)
1039                 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1040         pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1041
1042         /*
1043          * Set up general mode register.
1044          */
1045         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1046             BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1047             BGE_MODECTL_TX_NO_PHDR_CSUM);
1048
1049         /*
1050          * Disable memory write invalidate.  Apparently it is not supported
1051          * properly by these devices.
1052          */
1053         PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1054
1055         /* Set the timer prescaler (always 66Mhz) */
1056         CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1057
1058         return(0);
1059 }
1060
1061 static int
1062 bge_blockinit(struct bge_softc *sc)
1063 {
1064         struct bge_rcb *rcb;
1065         bus_size_t vrcb;
1066         bge_hostaddr taddr;
1067         int i;
1068
1069         /*
1070          * Initialize the memory window pointer register so that
1071          * we can access the first 32K of internal NIC RAM. This will
1072          * allow us to set up the TX send ring RCBs and the RX return
1073          * ring RCBs, plus other things which live in NIC memory.
1074          */
1075         CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1076
1077         /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1078
1079         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1080             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1081                 /* Configure mbuf memory pool */
1082                 if (sc->bge_extram) {
1083                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1084                             BGE_EXT_SSRAM);
1085                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1086                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1087                         else
1088                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1089                 } else {
1090                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1091                             BGE_BUFFPOOL_1);
1092                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1093                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1094                         else
1095                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1096                 }
1097
1098                 /* Configure DMA resource pool */
1099                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1100                     BGE_DMA_DESCRIPTORS);
1101                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1102         }
1103
1104         /* Configure mbuf pool watermarks */
1105         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1106             sc->bge_asicrev == BGE_ASICREV_BCM5750) {
1107                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1108                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1109         } else {
1110                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1111                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1112         }
1113         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1114
1115         /* Configure DMA resource watermarks */
1116         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1117         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1118
1119         /* Enable buffer manager */
1120         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1121             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1122                 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1123                     BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1124
1125                 /* Poll for buffer manager start indication */
1126                 for (i = 0; i < BGE_TIMEOUT; i++) {
1127                         if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1128                                 break;
1129                         DELAY(10);
1130                 }
1131
1132                 if (i == BGE_TIMEOUT) {
1133                         if_printf(&sc->arpcom.ac_if,
1134                                   "buffer manager failed to start\n");
1135                         return(ENXIO);
1136                 }
1137         }
1138
1139         /* Enable flow-through queues */
1140         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1141         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1142
1143         /* Wait until queue initialization is complete */
1144         for (i = 0; i < BGE_TIMEOUT; i++) {
1145                 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1146                         break;
1147                 DELAY(10);
1148         }
1149
1150         if (i == BGE_TIMEOUT) {
1151                 if_printf(&sc->arpcom.ac_if,
1152                           "flow-through queue init failed\n");
1153                 return(ENXIO);
1154         }
1155
1156         /* Initialize the standard RX ring control block */
1157         rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1158         rcb->bge_hostaddr.bge_addr_lo =
1159             BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1160         rcb->bge_hostaddr.bge_addr_hi =
1161             BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1162         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1163             sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1164         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1165             sc->bge_asicrev == BGE_ASICREV_BCM5750)
1166                 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1167         else
1168                 rcb->bge_maxlen_flags =
1169                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1170         if (sc->bge_extram)
1171                 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1172         else
1173                 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1174         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1175         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1176         CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1177         CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1178
1179         /*
1180          * Initialize the jumbo RX ring control block
1181          * We set the 'ring disabled' bit in the flags
1182          * field until we're actually ready to start
1183          * using this ring (i.e. once we set the MTU
1184          * high enough to require it).
1185          */
1186         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1187             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1188                 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1189
1190                 rcb->bge_hostaddr.bge_addr_lo =
1191                     BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1192                 rcb->bge_hostaddr.bge_addr_hi =
1193                     BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1194                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1195                     sc->bge_cdata.bge_rx_jumbo_ring_map,
1196                     BUS_DMASYNC_PREREAD);
1197                 rcb->bge_maxlen_flags =
1198                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1199                     BGE_RCB_FLAG_RING_DISABLED);
1200                 if (sc->bge_extram)
1201                         rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1202                 else
1203                         rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1204                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1205                     rcb->bge_hostaddr.bge_addr_hi);
1206                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1207                     rcb->bge_hostaddr.bge_addr_lo);
1208                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1209                     rcb->bge_maxlen_flags);
1210                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1211
1212                 /* Set up dummy disabled mini ring RCB */
1213                 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1214                 rcb->bge_maxlen_flags =
1215                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1216                 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1217                     rcb->bge_maxlen_flags);
1218         }
1219
1220         /*
1221          * Set the BD ring replentish thresholds. The recommended
1222          * values are 1/8th the number of descriptors allocated to
1223          * each ring.
1224          */
1225         CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1226         CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1227
1228         /*
1229          * Disable all unused send rings by setting the 'ring disabled'
1230          * bit in the flags field of all the TX send ring control blocks.
1231          * These are located in NIC memory.
1232          */
1233         vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1234         for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1235                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1236                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1237                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1238                 vrcb += sizeof(struct bge_rcb);
1239         }
1240
1241         /* Configure TX RCB 0 (we use only the first ring) */
1242         vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1243         BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1244         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1245         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1246         RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1247             BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1248         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1249             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1250                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1251                     BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1252         }
1253
1254         /* Disable all unused RX return rings */
1255         vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1256         for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1257                 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1258                 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1259                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1260                     BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1261                     BGE_RCB_FLAG_RING_DISABLED));
1262                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1263                 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1264                     (i * (sizeof(uint64_t))), 0);
1265                 vrcb += sizeof(struct bge_rcb);
1266         }
1267
1268         /* Initialize RX ring indexes */
1269         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1270         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1271         CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1272
1273         /*
1274          * Set up RX return ring 0
1275          * Note that the NIC address for RX return rings is 0x00000000.
1276          * The return rings live entirely within the host, so the
1277          * nicaddr field in the RCB isn't used.
1278          */
1279         vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1280         BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1281         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1282         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1283         RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1284         RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1285             BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1286
1287         /* Set random backoff seed for TX */
1288         CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1289             sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1290             sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1291             sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1292             BGE_TX_BACKOFF_SEED_MASK);
1293
1294         /* Set inter-packet gap */
1295         CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1296
1297         /*
1298          * Specify which ring to use for packets that don't match
1299          * any RX rules.
1300          */
1301         CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1302
1303         /*
1304          * Configure number of RX lists. One interrupt distribution
1305          * list, sixteen active lists, one bad frames class.
1306          */
1307         CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1308
1309         /* Inialize RX list placement stats mask. */
1310         CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1311         CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1312
1313         /* Disable host coalescing until we get it set up */
1314         CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1315
1316         /* Poll to make sure it's shut down. */
1317         for (i = 0; i < BGE_TIMEOUT; i++) {
1318                 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1319                         break;
1320                 DELAY(10);
1321         }
1322
1323         if (i == BGE_TIMEOUT) {
1324                 if_printf(&sc->arpcom.ac_if,
1325                           "host coalescing engine failed to idle\n");
1326                 return(ENXIO);
1327         }
1328
1329         /* Set up host coalescing defaults */
1330         CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1331         CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1332         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1333         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1334         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1335             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1336                 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1337                 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1338         }
1339         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1340         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1341
1342         /* Set up address of statistics block */
1343         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1344             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1345                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1346                     BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1347                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1348                     BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1349
1350                 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1351                 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1352                 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1353         }
1354
1355         /* Set up address of status block */
1356         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1357             BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1358         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1359             BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1360         sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1361         sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1362
1363         /* Turn on host coalescing state machine */
1364         CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1365
1366         /* Turn on RX BD completion state machine and enable attentions */
1367         CSR_WRITE_4(sc, BGE_RBDC_MODE,
1368             BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1369
1370         /* Turn on RX list placement state machine */
1371         CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1372
1373         /* Turn on RX list selector state machine. */
1374         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1375             sc->bge_asicrev != BGE_ASICREV_BCM5750)
1376                 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1377
1378         /* Turn on DMA, clear stats */
1379         CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1380             BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1381             BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1382             BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1383             (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1384
1385         /* Set misc. local control, enable interrupts on attentions */
1386         CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1387
1388 #ifdef notdef
1389         /* Assert GPIO pins for PHY reset */
1390         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1391             BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1392         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1393             BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1394 #endif
1395
1396         /* Turn on DMA completion state machine */
1397         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1398             sc->bge_asicrev != BGE_ASICREV_BCM5750)
1399                 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1400
1401         /* Turn on write DMA state machine */
1402         CSR_WRITE_4(sc, BGE_WDMA_MODE,
1403             BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1404         
1405         /* Turn on read DMA state machine */
1406         CSR_WRITE_4(sc, BGE_RDMA_MODE,
1407             BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1408
1409         /* Turn on RX data completion state machine */
1410         CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1411
1412         /* Turn on RX BD initiator state machine */
1413         CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1414
1415         /* Turn on RX data and RX BD initiator state machine */
1416         CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1417
1418         /* Turn on Mbuf cluster free state machine */
1419         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1420             sc->bge_asicrev != BGE_ASICREV_BCM5750)
1421                 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1422
1423         /* Turn on send BD completion state machine */
1424         CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1425
1426         /* Turn on send data completion state machine */
1427         CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1428
1429         /* Turn on send data initiator state machine */
1430         CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1431
1432         /* Turn on send BD initiator state machine */
1433         CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1434
1435         /* Turn on send BD selector state machine */
1436         CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1437
1438         CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1439         CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1440             BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1441
1442         /* ack/clear link change events */
1443         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1444             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1445             BGE_MACSTAT_LINK_CHANGED);
1446         CSR_WRITE_4(sc, BGE_MI_STS, 0);
1447
1448         /* Enable PHY auto polling (for MII/GMII only) */
1449         if (sc->bge_tbi) {
1450                 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1451         } else {
1452                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1453                 if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1454                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1455                             BGE_EVTENB_MI_INTERRUPT);
1456         }
1457
1458         /* Enable link state change attentions. */
1459         BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1460
1461         return(0);
1462 }
1463
1464 /*
1465  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1466  * against our list and return its name if we find a match. Note
1467  * that since the Broadcom controller contains VPD support, we
1468  * can get the device name string from the controller itself instead
1469  * of the compiled-in string. This is a little slow, but it guarantees
1470  * we'll always announce the right product name.
1471  */
1472 static int
1473 bge_probe(device_t dev)
1474 {
1475         struct bge_softc *sc;
1476         struct bge_type *t;
1477         char *descbuf;
1478         uint16_t product, vendor;
1479
1480         product = pci_get_device(dev);
1481         vendor = pci_get_vendor(dev);
1482
1483         for (t = bge_devs; t->bge_name != NULL; t++) {
1484                 if (vendor == t->bge_vid && product == t->bge_did)
1485                         break;
1486         }
1487
1488         if (t->bge_name == NULL)
1489                 return(ENXIO);
1490
1491         sc = device_get_softc(dev);
1492         descbuf = kmalloc(BGE_DEVDESC_MAX, M_TEMP, M_WAITOK);
1493         ksnprintf(descbuf, BGE_DEVDESC_MAX, "%s, ASIC rev. %#04x", t->bge_name,
1494             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1495         device_set_desc_copy(dev, descbuf);
1496         if (pci_get_subvendor(dev) == PCI_VENDOR_DELL)
1497                 sc->bge_no_3_led = 1;
1498         kfree(descbuf, M_TEMP);
1499         return(0);
1500 }
1501
1502 static int
1503 bge_attach(device_t dev)
1504 {
1505         struct ifnet *ifp;
1506         struct bge_softc *sc;
1507         uint32_t hwcfg = 0;
1508         uint32_t mac_addr = 0;
1509         int error = 0, rid;
1510         uint8_t ether_addr[ETHER_ADDR_LEN];
1511
1512         sc = device_get_softc(dev);
1513         sc->bge_dev = dev;
1514         callout_init(&sc->bge_stat_timer);
1515         lwkt_serialize_init(&sc->bge_jslot_serializer);
1516
1517         /*
1518          * Map control/status registers.
1519          */
1520         pci_enable_busmaster(dev);
1521
1522         rid = BGE_PCI_BAR0;
1523         sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1524             RF_ACTIVE);
1525
1526         if (sc->bge_res == NULL) {
1527                 device_printf(dev, "couldn't map memory\n");
1528                 error = ENXIO;
1529                 return(error);
1530         }
1531
1532         sc->bge_btag = rman_get_bustag(sc->bge_res);
1533         sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
1534
1535         /* Allocate interrupt */
1536         rid = 0;
1537
1538         sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1539             RF_SHAREABLE | RF_ACTIVE);
1540
1541         if (sc->bge_irq == NULL) {
1542                 device_printf(dev, "couldn't map interrupt\n");
1543                 error = ENXIO;
1544                 goto fail;
1545         }
1546
1547         /* Save ASIC rev. */
1548         sc->bge_chipid =
1549             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1550             BGE_PCIMISCCTL_ASICREV;
1551         sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
1552         sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
1553
1554         /*
1555          * Treat the 5714 and the 5752 like the 5750 until we have more info
1556          * on this chip.
1557          */
1558         if (sc->bge_asicrev == BGE_ASICREV_BCM5714 ||
1559             sc->bge_asicrev == BGE_ASICREV_BCM5752)
1560                 sc->bge_asicrev = BGE_ASICREV_BCM5750;
1561
1562         /*
1563          * XXX: Broadcom Linux driver.  Not in specs or eratta.
1564          * PCI-Express?
1565          */
1566         if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
1567                 uint32_t v;
1568
1569                 v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
1570                 if (((v >> 8) & 0xff) == BGE_PCIE_MSI_CAPID) {
1571                         v = pci_read_config(dev, BGE_PCIE_MSI_CAPID, 4);
1572                         if ((v & 0xff) == BGE_PCIE_MSI_CAPID_VAL)
1573                                 sc->bge_pcie = 1;
1574                 }
1575         }
1576
1577         ifp = &sc->arpcom.ac_if;
1578         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1579
1580         /* Try to reset the chip. */
1581         bge_reset(sc);
1582
1583         if (bge_chipinit(sc)) {
1584                 device_printf(dev, "chip initialization failed\n");
1585                 error = ENXIO;
1586                 goto fail;
1587         }
1588
1589         /*
1590          * Get station address from the EEPROM.
1591          */
1592         mac_addr = bge_readmem_ind(sc, 0x0c14);
1593         if ((mac_addr >> 16) == 0x484b) {
1594                 ether_addr[0] = (uint8_t)(mac_addr >> 8);
1595                 ether_addr[1] = (uint8_t)mac_addr;
1596                 mac_addr = bge_readmem_ind(sc, 0x0c18);
1597                 ether_addr[2] = (uint8_t)(mac_addr >> 24);
1598                 ether_addr[3] = (uint8_t)(mac_addr >> 16);
1599                 ether_addr[4] = (uint8_t)(mac_addr >> 8);
1600                 ether_addr[5] = (uint8_t)mac_addr;
1601         } else if (bge_read_eeprom(sc, ether_addr,
1602             BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
1603                 device_printf(dev, "failed to read station address\n");
1604                 error = ENXIO;
1605                 goto fail;
1606         }
1607
1608         /* 5705/5750 limits RX return ring to 512 entries. */
1609         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1610             sc->bge_asicrev == BGE_ASICREV_BCM5750)
1611                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1612         else
1613                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
1614
1615         error = bge_dma_alloc(sc);
1616         if (error)
1617                 goto fail;
1618
1619         /* Set default tuneable values. */
1620         sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
1621         sc->bge_rx_coal_ticks = 150;
1622         sc->bge_tx_coal_ticks = 150;
1623         sc->bge_rx_max_coal_bds = 64;
1624         sc->bge_tx_max_coal_bds = 128;
1625
1626         /* Set up ifnet structure */
1627         ifp->if_softc = sc;
1628         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1629         ifp->if_ioctl = bge_ioctl;
1630         ifp->if_start = bge_start;
1631         ifp->if_watchdog = bge_watchdog;
1632         ifp->if_init = bge_init;
1633         ifp->if_mtu = ETHERMTU;
1634         ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1635         ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1636         ifq_set_ready(&ifp->if_snd);
1637
1638         /*
1639          * 5700 B0 chips do not support checksumming correctly due
1640          * to hardware bugs.
1641          */
1642         if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) {
1643                 ifp->if_capabilities |= IFCAP_HWCSUM;
1644                 ifp->if_hwassist = BGE_CSUM_FEATURES;
1645         }
1646         ifp->if_capenable = ifp->if_capabilities;
1647
1648         /*
1649          * Figure out what sort of media we have by checking the
1650          * hardware config word in the first 32k of NIC internal memory,
1651          * or fall back to examining the EEPROM if necessary.
1652          * Note: on some BCM5700 cards, this value appears to be unset.
1653          * If that's the case, we have to rely on identifying the NIC
1654          * by its PCI subsystem ID, as we do below for the SysKonnect
1655          * SK-9D41.
1656          */
1657         if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
1658                 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
1659         else {
1660                 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
1661                                     sizeof(hwcfg))) {
1662                         device_printf(dev, "failed to read EEPROM\n");
1663                         error = ENXIO;
1664                         goto fail;
1665                 }
1666                 hwcfg = ntohl(hwcfg);
1667         }
1668
1669         if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
1670                 sc->bge_tbi = 1;
1671
1672         /* The SysKonnect SK-9D41 is a 1000baseSX card. */
1673         if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41)
1674                 sc->bge_tbi = 1;
1675
1676         if (sc->bge_tbi) {
1677                 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
1678                     bge_ifmedia_upd, bge_ifmedia_sts);
1679                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
1680                 ifmedia_add(&sc->bge_ifmedia,
1681                     IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
1682                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1683                 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
1684                 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
1685         } else {
1686                 /*
1687                  * Do transceiver setup.
1688                  */
1689                 if (mii_phy_probe(dev, &sc->bge_miibus,
1690                     bge_ifmedia_upd, bge_ifmedia_sts)) {
1691                         device_printf(dev, "MII without any PHY!\n");
1692                         error = ENXIO;
1693                         goto fail;
1694                 }
1695         }
1696
1697         /*
1698          * When using the BCM5701 in PCI-X mode, data corruption has
1699          * been observed in the first few bytes of some received packets.
1700          * Aligning the packet buffer in memory eliminates the corruption.
1701          * Unfortunately, this misaligns the packet payloads.  On platforms
1702          * which do not support unaligned accesses, we will realign the
1703          * payloads by copying the received packets.
1704          */
1705         switch (sc->bge_chipid) {
1706         case BGE_CHIPID_BCM5701_A0:
1707         case BGE_CHIPID_BCM5701_B0:
1708         case BGE_CHIPID_BCM5701_B2:
1709         case BGE_CHIPID_BCM5701_B5:
1710                 /* If in PCI-X mode, work around the alignment bug. */
1711                 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
1712                     (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
1713                     BGE_PCISTATE_PCI_BUSSPEED)
1714                         sc->bge_rx_alignment_bug = 1;
1715                 break;
1716         }
1717
1718         /*
1719          * Call MI attach routine.
1720          */
1721         ether_ifattach(ifp, ether_addr, NULL);
1722
1723         error = bus_setup_intr(dev, sc->bge_irq, INTR_NETSAFE,
1724                                bge_intr, sc, &sc->bge_intrhand, 
1725                                ifp->if_serializer);
1726         if (error) {
1727                 ether_ifdetach(ifp);
1728                 device_printf(dev, "couldn't set up irq\n");
1729                 goto fail;
1730         }
1731         return(0);
1732 fail:
1733         bge_detach(dev);
1734         return(error);
1735 }
1736
1737 static int
1738 bge_detach(device_t dev)
1739 {
1740         struct bge_softc *sc = device_get_softc(dev);
1741         struct ifnet *ifp = &sc->arpcom.ac_if;
1742
1743         if (device_is_attached(dev)) {
1744                 lwkt_serialize_enter(ifp->if_serializer);
1745                 bge_stop(sc);
1746                 bge_reset(sc);
1747                 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
1748                 lwkt_serialize_exit(ifp->if_serializer);
1749
1750                 ether_ifdetach(ifp);
1751         }
1752         if (sc->bge_tbi)
1753                 ifmedia_removeall(&sc->bge_ifmedia);
1754         if (sc->bge_miibus)
1755                 device_delete_child(dev, sc->bge_miibus);
1756         bus_generic_detach(dev);
1757
1758         bge_release_resources(sc);
1759         bge_dma_free(sc);
1760
1761         return 0;
1762 }
1763
1764 static void
1765 bge_release_resources(struct bge_softc *sc)
1766 {
1767         device_t dev;
1768
1769         dev = sc->bge_dev;
1770
1771         if (sc->bge_irq != NULL)
1772                 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
1773
1774         if (sc->bge_res != NULL)
1775                 bus_release_resource(dev, SYS_RES_MEMORY,
1776                     BGE_PCI_BAR0, sc->bge_res);
1777 }
1778
1779 static void
1780 bge_reset(struct bge_softc *sc)
1781 {
1782         device_t dev;
1783         uint32_t cachesize, command, pcistate, reset;
1784         int i, val = 0;
1785
1786         dev = sc->bge_dev;
1787
1788         /* Save some important PCI state. */
1789         cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
1790         command = pci_read_config(dev, BGE_PCI_CMD, 4);
1791         pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
1792
1793         pci_write_config(dev, BGE_PCI_MISC_CTL,
1794             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1795             BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1796
1797         reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
1798
1799         /* XXX: Broadcom Linux driver. */
1800         if (sc->bge_pcie) {
1801                 if (CSR_READ_4(sc, 0x7e2c) == 0x60)     /* PCIE 1.0 */
1802                         CSR_WRITE_4(sc, 0x7e2c, 0x20);
1803                 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
1804                         /* Prevent PCIE link training during global reset */
1805                         CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
1806                         reset |= (1<<29);
1807                 }
1808         }
1809
1810         /* Issue global reset */
1811         bge_writereg_ind(sc, BGE_MISC_CFG, reset);
1812
1813         DELAY(1000);
1814
1815         /* XXX: Broadcom Linux driver. */
1816         if (sc->bge_pcie) {
1817                 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
1818                         uint32_t v;
1819
1820                         DELAY(500000); /* wait for link training to complete */
1821                         v = pci_read_config(dev, 0xc4, 4);
1822                         pci_write_config(dev, 0xc4, v | (1<<15), 4);
1823                 }
1824                 /* Set PCIE max payload size and clear error status. */
1825                 pci_write_config(dev, 0xd8, 0xf5000, 4);
1826         }
1827
1828         /* Reset some of the PCI state that got zapped by reset */
1829         pci_write_config(dev, BGE_PCI_MISC_CTL,
1830             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1831             BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1832         pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
1833         pci_write_config(dev, BGE_PCI_CMD, command, 4);
1834         bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
1835
1836         /* Enable memory arbiter. */
1837         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1838                 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
1839
1840         /*
1841          * Prevent PXE restart: write a magic number to the
1842          * general communications memory at 0xB50.
1843          */
1844         bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1845         /*
1846          * Poll the value location we just wrote until
1847          * we see the 1's complement of the magic number.
1848          * This indicates that the firmware initialization
1849          * is complete.
1850          */
1851         for (i = 0; i < BGE_TIMEOUT; i++) {
1852                 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
1853                 if (val == ~BGE_MAGIC_NUMBER)
1854                         break;
1855                 DELAY(10);
1856         }
1857         
1858         if (i == BGE_TIMEOUT) {
1859                 if_printf(&sc->arpcom.ac_if, "firmware handshake timed out\n");
1860                 return;
1861         }
1862
1863         /*
1864          * XXX Wait for the value of the PCISTATE register to
1865          * return to its original pre-reset state. This is a
1866          * fairly good indicator of reset completion. If we don't
1867          * wait for the reset to fully complete, trying to read
1868          * from the device's non-PCI registers may yield garbage
1869          * results.
1870          */
1871         for (i = 0; i < BGE_TIMEOUT; i++) {
1872                 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
1873                         break;
1874                 DELAY(10);
1875         }
1876
1877         /* Fix up byte swapping */
1878         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
1879             BGE_MODECTL_BYTESWAP_DATA);
1880
1881         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1882
1883         /*
1884          * The 5704 in TBI mode apparently needs some special
1885          * adjustment to insure the SERDES drive level is set
1886          * to 1.2V.
1887          */
1888         if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) {
1889                 uint32_t serdescfg;
1890
1891                 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
1892                 serdescfg = (serdescfg & ~0xFFF) | 0x880;
1893                 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
1894         }
1895
1896         /* XXX: Broadcom Linux driver. */
1897         if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
1898                 uint32_t v;
1899
1900                 v = CSR_READ_4(sc, 0x7c00);
1901                 CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
1902         }
1903
1904         DELAY(10000);
1905 }
1906
1907 /*
1908  * Frame reception handling. This is called if there's a frame
1909  * on the receive return list.
1910  *
1911  * Note: we have to be able to handle two possibilities here:
1912  * 1) the frame is from the jumbo recieve ring
1913  * 2) the frame is from the standard receive ring
1914  */
1915
1916 static void
1917 bge_rxeof(struct bge_softc *sc)
1918 {
1919         struct ifnet *ifp;
1920         int stdcnt = 0, jumbocnt = 0;
1921
1922         if (sc->bge_rx_saved_considx ==
1923             sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx)
1924                 return;
1925
1926         ifp = &sc->arpcom.ac_if;
1927
1928         bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
1929                         sc->bge_cdata.bge_rx_return_ring_map,
1930                         BUS_DMASYNC_POSTREAD);
1931         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1932                         sc->bge_cdata.bge_rx_std_ring_map,
1933                         BUS_DMASYNC_POSTREAD);
1934         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1935             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1936                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1937                                 sc->bge_cdata.bge_rx_jumbo_ring_map,
1938                                 BUS_DMASYNC_POSTREAD);
1939         }
1940
1941         while (sc->bge_rx_saved_considx !=
1942                sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
1943                 struct bge_rx_bd        *cur_rx;
1944                 uint32_t                rxidx;
1945                 struct mbuf             *m = NULL;
1946                 uint16_t                vlan_tag = 0;
1947                 int                     have_tag = 0;
1948
1949                 cur_rx =
1950             &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
1951
1952                 rxidx = cur_rx->bge_idx;
1953                 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
1954
1955                 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
1956                         have_tag = 1;
1957                         vlan_tag = cur_rx->bge_vlan_tag;
1958                 }
1959
1960                 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
1961                         BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1962                         m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
1963                         sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
1964                         jumbocnt++;
1965                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
1966                                 ifp->if_ierrors++;
1967                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
1968                                 continue;
1969                         }
1970                         if (bge_newbuf_jumbo(sc,
1971                             sc->bge_jumbo, NULL) == ENOBUFS) {
1972                                 ifp->if_ierrors++;
1973                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
1974                                 continue;
1975                         }
1976                 } else {
1977                         BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1978                         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
1979                                         sc->bge_cdata.bge_rx_std_dmamap[rxidx],
1980                                         BUS_DMASYNC_POSTREAD);
1981                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
1982                                 sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
1983                         m = sc->bge_cdata.bge_rx_std_chain[rxidx];
1984                         sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
1985                         stdcnt++;
1986                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
1987                                 ifp->if_ierrors++;
1988                                 bge_newbuf_std(sc, sc->bge_std, m);
1989                                 continue;
1990                         }
1991                         if (bge_newbuf_std(sc, sc->bge_std,
1992                             NULL) == ENOBUFS) {
1993                                 ifp->if_ierrors++;
1994                                 bge_newbuf_std(sc, sc->bge_std, m);
1995                                 continue;
1996                         }
1997                 }
1998
1999                 ifp->if_ipackets++;
2000 #ifndef __i386__
2001                 /*
2002                  * The i386 allows unaligned accesses, but for other
2003                  * platforms we must make sure the payload is aligned.
2004                  */
2005                 if (sc->bge_rx_alignment_bug) {
2006                         bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2007                             cur_rx->bge_len);
2008                         m->m_data += ETHER_ALIGN;
2009                 }
2010 #endif
2011                 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2012                 m->m_pkthdr.rcvif = ifp;
2013
2014                 if (ifp->if_capenable & IFCAP_RXCSUM) {
2015                         if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2016                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2017                                 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2018                                         m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2019                         }
2020                         if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
2021                             m->m_pkthdr.len >= BGE_MIN_FRAME) {
2022                                 m->m_pkthdr.csum_data =
2023                                     cur_rx->bge_tcp_udp_csum;
2024                                 m->m_pkthdr.csum_flags |=
2025                                         CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2026                         }
2027                 }
2028
2029                 /*
2030                  * If we received a packet with a vlan tag, pass it
2031                  * to vlan_input() instead of ether_input().
2032                  */
2033                 if (have_tag) {
2034                         VLAN_INPUT_TAG(m, vlan_tag);
2035                         have_tag = vlan_tag = 0;
2036                 } else {
2037                         ifp->if_input(ifp, m);
2038                 }
2039         }
2040
2041         if (stdcnt > 0) {
2042                 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2043                                 sc->bge_cdata.bge_rx_std_ring_map,
2044                                 BUS_DMASYNC_PREWRITE);
2045         }
2046
2047         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2048             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2049                 if (jumbocnt > 0) {
2050                         bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2051                                         sc->bge_cdata.bge_rx_jumbo_ring_map,
2052                                         BUS_DMASYNC_PREWRITE);
2053                 }
2054         }
2055
2056         CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2057         if (stdcnt)
2058                 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2059         if (jumbocnt)
2060                 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2061 }
2062
2063 static void
2064 bge_txeof(struct bge_softc *sc)
2065 {
2066         struct bge_tx_bd *cur_tx = NULL;
2067         struct ifnet *ifp;
2068
2069         if (sc->bge_tx_saved_considx ==
2070             sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
2071                 return;
2072
2073         ifp = &sc->arpcom.ac_if;
2074
2075         bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
2076                         sc->bge_cdata.bge_tx_ring_map,
2077                         BUS_DMASYNC_POSTREAD);
2078
2079         /*
2080          * Go through our tx ring and free mbufs for those
2081          * frames that have been sent.
2082          */
2083         while (sc->bge_tx_saved_considx !=
2084                sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2085                 uint32_t idx = 0;
2086
2087                 idx = sc->bge_tx_saved_considx;
2088                 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2089                 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2090                         ifp->if_opackets++;
2091                 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2092                         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2093                                         sc->bge_cdata.bge_tx_dmamap[idx],
2094                                         BUS_DMASYNC_POSTWRITE);
2095                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2096                             sc->bge_cdata.bge_tx_dmamap[idx]);
2097                         m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2098                         sc->bge_cdata.bge_tx_chain[idx] = NULL;
2099                 }
2100                 sc->bge_txcnt--;
2101                 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2102         }
2103
2104         if (cur_tx != NULL &&
2105             (BGE_TX_RING_CNT - sc->bge_txcnt) >=
2106             (BGE_NSEG_RSVD + BGE_NSEG_SPARE))
2107                 ifp->if_flags &= ~IFF_OACTIVE;
2108
2109         if (sc->bge_txcnt == 0)
2110                 ifp->if_timer = 0;
2111
2112         if (!ifq_is_empty(&ifp->if_snd))
2113                 ifp->if_start(ifp);
2114 }
2115
2116 static void
2117 bge_intr(void *xsc)
2118 {
2119         struct bge_softc *sc = xsc;
2120         struct ifnet *ifp = &sc->arpcom.ac_if;
2121         uint32_t status, statusword, mimode;
2122
2123         /*
2124          * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO.  Don't
2125          * disable interrupts by writing nonzero like we used to, since with
2126          * our current organization this just gives complications and
2127          * pessimizations for re-enabling interrupts.  We used to have races
2128          * instead of the necessary complications.  Disabling interrupts
2129          * would just reduce the chance of a status update while we are
2130          * running (by switching to the interrupt-mode coalescence
2131          * parameters), but this chance is already very low so it is more
2132          * efficient to get another interrupt than prevent it.
2133          *
2134          * We do the ack first to ensure another interrupt if there is a
2135          * status update after the ack.  We don't check for the status
2136          * changing later because it is more efficient to get another
2137          * interrupt than prevent it, not quite as above (not checking is
2138          * a smaller optimization than not toggling the interrupt enable,
2139          * since checking doesn't involve PCI accesses and toggling require
2140          * the status check).  So toggling would probably be a pessimization
2141          * even with MSI.  It would only be needed for using a task queue.
2142          */
2143         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2144
2145         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2146                         sc->bge_cdata.bge_status_map,
2147                         BUS_DMASYNC_POSTREAD);
2148
2149         /* XXX */
2150         statusword = loadandclear(&sc->bge_ldata.bge_status_block->bge_status);
2151
2152         /*
2153          * Process link state changes.
2154          * Grrr. The link status word in the status block does
2155          * not work correctly on the BCM5700 rev AX and BX chips,
2156          * according to all available information. Hence, we have
2157          * to enable MII interrupts in order to properly obtain
2158          * async link changes. Unfortunately, this also means that
2159          * we have to read the MAC status register to detect link
2160          * changes, thereby adding an additional register access to
2161          * the interrupt handler.
2162          */
2163
2164         if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
2165                 status = CSR_READ_4(sc, BGE_MAC_STS);
2166                 if (status & BGE_MACSTAT_MI_INTERRUPT) {
2167                         sc->bge_link = 0;
2168                         callout_stop(&sc->bge_stat_timer);
2169                         bge_tick_serialized(sc);
2170                         /* Clear the interrupt */
2171                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2172                             BGE_EVTENB_MI_INTERRUPT);
2173                         bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
2174                         bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
2175                             BRGPHY_INTRS);
2176                 }
2177         } else {
2178                 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) {
2179                         /*
2180                          * Sometimes PCS encoding errors are detected in
2181                          * TBI mode (on fiber NICs), and for some reason
2182                          * the chip will signal them as link changes.
2183                          * If we get a link change event, but the 'PCS
2184                          * encoding error' bit in the MAC status register
2185                          * is set, don't bother doing a link check.
2186                          * This avoids spurious "gigabit link up" messages
2187                          * that sometimes appear on fiber NICs during
2188                          * periods of heavy traffic. (There should be no
2189                          * effect on copper NICs.)
2190                          *
2191                          * If we do have a copper NIC (bge_tbi == 0) then
2192                          * check that the AUTOPOLL bit is set before
2193                          * processing the event as a real link change.
2194                          * Turning AUTOPOLL on and off in the MII read/write
2195                          * functions will often trigger a link status
2196                          * interrupt for no reason.
2197                          */
2198                         status = CSR_READ_4(sc, BGE_MAC_STS);
2199                         mimode = CSR_READ_4(sc, BGE_MI_MODE);
2200                         if (!(status & (BGE_MACSTAT_PORT_DECODE_ERROR |
2201                                         BGE_MACSTAT_MI_COMPLETE)) &&
2202                             (!sc->bge_tbi && (mimode & BGE_MIMODE_AUTOPOLL))) {
2203                                 sc->bge_link = 0;
2204                                 callout_stop(&sc->bge_stat_timer);
2205                                 bge_tick_serialized(sc);
2206                         }
2207                         sc->bge_link = 0;
2208                         callout_stop(&sc->bge_stat_timer);
2209                         bge_tick_serialized(sc);
2210                         /* Clear the interrupt */
2211                         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
2212                             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
2213                             BGE_MACSTAT_LINK_CHANGED);
2214
2215                         /* Force flush the status block cached by PCI bridge */
2216                         CSR_READ_4(sc, BGE_MBX_IRQ0_LO);
2217                 }
2218         }
2219
2220         if (ifp->if_flags & IFF_RUNNING) {
2221                 /* Check RX return ring producer/consumer */
2222                 bge_rxeof(sc);
2223
2224                 /* Check TX ring producer/consumer */
2225                 bge_txeof(sc);
2226         }
2227 }
2228
2229 static void
2230 bge_tick(void *xsc)
2231 {
2232         struct bge_softc *sc = xsc;
2233         struct ifnet *ifp = &sc->arpcom.ac_if;
2234
2235         lwkt_serialize_enter(ifp->if_serializer);
2236         bge_tick_serialized(xsc);
2237         lwkt_serialize_exit(ifp->if_serializer);
2238 }
2239
2240 static void
2241 bge_tick_serialized(void *xsc)
2242 {
2243         struct bge_softc *sc = xsc;
2244         struct ifnet *ifp = &sc->arpcom.ac_if;
2245         struct mii_data *mii = NULL;
2246         struct ifmedia *ifm = NULL;
2247
2248         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2249             sc->bge_asicrev == BGE_ASICREV_BCM5750)
2250                 bge_stats_update_regs(sc);
2251         else
2252                 bge_stats_update(sc);
2253
2254         callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2255
2256         if (sc->bge_link) {
2257                 return;
2258         }
2259
2260         if (sc->bge_tbi) {
2261                 ifm = &sc->bge_ifmedia;
2262                 if (CSR_READ_4(sc, BGE_MAC_STS) &
2263                     BGE_MACSTAT_TBI_PCS_SYNCHED) {
2264                         sc->bge_link++;
2265                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
2266                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
2267                                            BGE_MACMODE_TBI_SEND_CFGS);
2268                         }
2269                         CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
2270                         if_printf(ifp, "gigabit link up\n");
2271                         if (!ifq_is_empty(&ifp->if_snd))
2272                                 ifp->if_start(ifp);
2273                 }
2274                 return;
2275         }
2276
2277         mii = device_get_softc(sc->bge_miibus);
2278         mii_tick(mii);
2279  
2280         if (!sc->bge_link) {
2281                 mii_pollstat(mii);
2282                 if (mii->mii_media_status & IFM_ACTIVE &&
2283                     IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2284                         sc->bge_link++;
2285                         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
2286                             IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
2287                                 if_printf(ifp, "gigabit link up\n");
2288                         if (!ifq_is_empty(&ifp->if_snd))
2289                                 ifp->if_start(ifp);
2290                 }
2291         }
2292 }
2293
2294 static void
2295 bge_stats_update_regs(struct bge_softc *sc)
2296 {
2297         struct ifnet *ifp = &sc->arpcom.ac_if;
2298         struct bge_mac_stats_regs stats;
2299         uint32_t *s;
2300         int i;
2301
2302         s = (uint32_t *)&stats;
2303         for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2304                 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2305                 s++;
2306         }
2307
2308         ifp->if_collisions +=
2309            (stats.dot3StatsSingleCollisionFrames +
2310            stats.dot3StatsMultipleCollisionFrames +
2311            stats.dot3StatsExcessiveCollisions +
2312            stats.dot3StatsLateCollisions) -
2313            ifp->if_collisions;
2314 }
2315
2316 static void
2317 bge_stats_update(struct bge_softc *sc)
2318 {
2319         struct ifnet *ifp = &sc->arpcom.ac_if;
2320         bus_size_t stats;
2321
2322         stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2323
2324 #define READ_STAT(sc, stats, stat)      \
2325         CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2326
2327         ifp->if_collisions +=
2328            (READ_STAT(sc, stats,
2329                 txstats.dot3StatsSingleCollisionFrames.bge_addr_lo) +
2330             READ_STAT(sc, stats,
2331                 txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo) +
2332             READ_STAT(sc, stats,
2333                 txstats.dot3StatsExcessiveCollisions.bge_addr_lo) +
2334             READ_STAT(sc, stats,
2335                 txstats.dot3StatsLateCollisions.bge_addr_lo)) -
2336            ifp->if_collisions;
2337
2338 #undef READ_STAT
2339
2340 #ifdef notdef
2341         ifp->if_collisions +=
2342            (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2343            sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2344            sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2345            sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2346            ifp->if_collisions;
2347 #endif
2348 }
2349
2350 /*
2351  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
2352  * pointers to descriptors.
2353  */
2354 static int
2355 bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx)
2356 {
2357         struct bge_tx_bd *d = NULL;
2358         uint16_t csum_flags = 0;
2359         struct ifvlan *ifv = NULL;
2360         struct bge_dmamap_arg ctx;
2361         bus_dma_segment_t segs[BGE_NSEG_NEW];
2362         bus_dmamap_t map;
2363         int error, maxsegs, idx, i;
2364
2365         if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
2366             m_head->m_pkthdr.rcvif != NULL &&
2367             m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
2368                 ifv = m_head->m_pkthdr.rcvif->if_softc;
2369
2370         if (m_head->m_pkthdr.csum_flags) {
2371                 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2372                         csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2373                 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2374                         csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2375                 if (m_head->m_flags & M_LASTFRAG)
2376                         csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2377                 else if (m_head->m_flags & M_FRAG)
2378                         csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2379         }
2380
2381         idx = *txidx;
2382         map = sc->bge_cdata.bge_tx_dmamap[idx];
2383
2384         maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - BGE_NSEG_RSVD;
2385         KASSERT(maxsegs >= BGE_NSEG_SPARE,
2386                 ("not enough segments %d\n", maxsegs));
2387
2388         if (maxsegs > BGE_NSEG_NEW)
2389                 maxsegs = BGE_NSEG_NEW;
2390
2391         /*
2392          * Pad outbound frame to BGE_MIN_FRAME for an unusual reason.
2393          * The bge hardware will pad out Tx runts to BGE_MIN_FRAME,
2394          * but when such padded frames employ the bge IP/TCP checksum
2395          * offload, the hardware checksum assist gives incorrect results
2396          * (possibly from incorporating its own padding into the UDP/TCP
2397          * checksum; who knows).  If we pad such runts with zeros, the
2398          * onboard checksum comes out correct.  We do this by pretending
2399          * the mbuf chain has too many fragments so the coalescing code
2400          * below can assemble the packet into a single buffer that's
2401          * padded out to the mininum frame size.
2402          */
2403         if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
2404             m_head->m_pkthdr.len < BGE_MIN_FRAME) {
2405                 error = E2BIG;
2406         } else {
2407                 ctx.bge_segs = segs;
2408                 ctx.bge_maxsegs = maxsegs;
2409                 error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map,
2410                                              m_head, bge_dma_map_mbuf, &ctx,
2411                                              BUS_DMA_NOWAIT);
2412         }
2413         if (error == E2BIG || ctx.bge_maxsegs == 0) {
2414                 struct mbuf *m_new;
2415
2416                 m_new = m_defrag(m_head, MB_DONTWAIT);
2417                 if (m_new == NULL) {
2418                         if_printf(&sc->arpcom.ac_if,
2419                                   "could not defrag TX mbuf\n");
2420                         error = ENOBUFS;
2421                         goto back;
2422                 } else {
2423                         m_head = m_new;
2424                 }
2425
2426                 /*
2427                  * Manually pad short frames, and zero the pad space
2428                  * to avoid leaking data.
2429                  */
2430                 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
2431                     m_head->m_pkthdr.len < BGE_MIN_FRAME) {
2432                         int pad_len = BGE_MIN_FRAME - m_head->m_pkthdr.len;
2433
2434                         bzero(mtod(m_head, char *) + m_head->m_pkthdr.len,
2435                               pad_len);
2436                         m_head->m_pkthdr.len += pad_len;
2437                         m_head->m_len = m_head->m_pkthdr.len;
2438                 }
2439
2440                 ctx.bge_segs = segs;
2441                 ctx.bge_maxsegs = maxsegs;
2442                 error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map,
2443                                              m_head, bge_dma_map_mbuf, &ctx,
2444                                              BUS_DMA_NOWAIT);
2445                 if (error || ctx.bge_maxsegs == 0) {
2446                         if_printf(&sc->arpcom.ac_if,
2447                                   "could not defrag TX mbuf\n");
2448                         if (error == 0)
2449                                 error = E2BIG;
2450                         goto back;
2451                 }
2452         } else if (error) {
2453                 if_printf(&sc->arpcom.ac_if, "could not map TX mbuf\n");
2454                 goto back;
2455         }
2456
2457         bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE);
2458
2459         for (i = 0; ; i++) {
2460                 d = &sc->bge_ldata.bge_tx_ring[idx];
2461
2462                 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_segs[i].ds_addr);
2463                 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_segs[i].ds_addr);
2464                 d->bge_len = segs[i].ds_len;
2465                 d->bge_flags = csum_flags;
2466
2467                 if (i == ctx.bge_maxsegs - 1)
2468                         break;
2469                 BGE_INC(idx, BGE_TX_RING_CNT);
2470         }
2471         /* Mark the last segment as end of packet... */
2472         d->bge_flags |= BGE_TXBDFLAG_END;
2473
2474         /* Set vlan tag to the first segment of the packet. */
2475         d = &sc->bge_ldata.bge_tx_ring[*txidx];
2476         if (ifv != NULL) {
2477                 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2478                 d->bge_vlan_tag = ifv->ifv_tag;
2479         } else {
2480                 d->bge_vlan_tag = 0;
2481         }
2482
2483         /*
2484          * Insure that the map for this transmission is placed at
2485          * the array index of the last descriptor in this chain.
2486          */
2487         sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
2488         sc->bge_cdata.bge_tx_dmamap[idx] = map;
2489         sc->bge_cdata.bge_tx_chain[idx] = m_head;
2490         sc->bge_txcnt += ctx.bge_maxsegs;
2491
2492         BGE_INC(idx, BGE_TX_RING_CNT);
2493         *txidx = idx;
2494 back:
2495         if (error)
2496                 m_freem(m_head);
2497         return error;
2498 }
2499
2500 /*
2501  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2502  * to the mbuf data regions directly in the transmit descriptors.
2503  */
2504 static void
2505 bge_start(struct ifnet *ifp)
2506 {
2507         struct bge_softc *sc = ifp->if_softc;
2508         struct mbuf *m_head = NULL;
2509         uint32_t prodidx;
2510         int need_trans;
2511
2512         if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING ||
2513             !sc->bge_link)
2514                 return;
2515
2516         prodidx = sc->bge_tx_prodidx;
2517
2518         need_trans = 0;
2519         while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
2520                 m_head = ifq_poll(&ifp->if_snd);
2521                 if (m_head == NULL)
2522                         break;
2523
2524                 /*
2525                  * XXX
2526                  * The code inside the if() block is never reached since we
2527                  * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
2528                  * requests to checksum TCP/UDP in a fragmented packet.
2529                  * 
2530                  * XXX
2531                  * safety overkill.  If this is a fragmented packet chain
2532                  * with delayed TCP/UDP checksums, then only encapsulate
2533                  * it if we have enough descriptors to handle the entire
2534                  * chain at once.
2535                  * (paranoia -- may not actually be needed)
2536                  */
2537                 if (m_head->m_flags & M_FIRSTFRAG &&
2538                     m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
2539                         if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2540                             m_head->m_pkthdr.csum_data + 16) {
2541                                 ifp->if_flags |= IFF_OACTIVE;
2542                                 break;
2543                         }
2544                 }
2545
2546                 /*
2547                  * Sanity check: avoid coming within BGE_NSEG_RSVD
2548                  * descriptors of the end of the ring.  Also make
2549                  * sure there are BGE_NSEG_SPARE descriptors for
2550                  * jumbo buffers' defragmentation.
2551                  */
2552                 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2553                     (BGE_NSEG_RSVD + BGE_NSEG_SPARE)) {
2554                         ifp->if_flags |= IFF_OACTIVE;
2555                         break;
2556                 }
2557
2558                 /*
2559                  * Dequeue the packet before encapsulation, since
2560                  * bge_encap() may free the packet if error happens.
2561                  */
2562                 ifq_dequeue(&ifp->if_snd, m_head);
2563
2564                 /*
2565                  * Pack the data into the transmit ring. If we
2566                  * don't have room, set the OACTIVE flag and wait
2567                  * for the NIC to drain the ring.
2568                  */
2569                 if (bge_encap(sc, m_head, &prodidx)) {
2570                         ifp->if_flags |= IFF_OACTIVE;
2571                         break;
2572                 }
2573                 need_trans = 1;
2574
2575                 BPF_MTAP(ifp, m_head);
2576         }
2577
2578         if (!need_trans)
2579                 return;
2580
2581         /* Transmit */
2582         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2583         /* 5700 b2 errata */
2584         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
2585                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2586
2587         sc->bge_tx_prodidx = prodidx;
2588
2589         /*
2590          * Set a timeout in case the chip goes out to lunch.
2591          */
2592         ifp->if_timer = 5;
2593 }
2594
2595 static void
2596 bge_init(void *xsc)
2597 {
2598         struct bge_softc *sc = xsc;
2599         struct ifnet *ifp = &sc->arpcom.ac_if;
2600         uint16_t *m;
2601
2602         ASSERT_SERIALIZED(ifp->if_serializer);
2603
2604         if (ifp->if_flags & IFF_RUNNING)
2605                 return;
2606
2607         /* Cancel pending I/O and flush buffers. */
2608         bge_stop(sc);
2609         bge_reset(sc);
2610         bge_chipinit(sc);
2611
2612         /*
2613          * Init the various state machines, ring
2614          * control blocks and firmware.
2615          */
2616         if (bge_blockinit(sc)) {
2617                 if_printf(ifp, "initialization failure\n");
2618                 return;
2619         }
2620
2621         /* Specify MTU. */
2622         CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
2623             ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
2624
2625         /* Load our MAC address. */
2626         m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
2627         CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2628         CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2629
2630         /* Enable or disable promiscuous mode as needed. */
2631         bge_setpromisc(sc);
2632
2633         /* Program multicast filter. */
2634         bge_setmulti(sc);
2635
2636         /* Init RX ring. */
2637         bge_init_rx_ring_std(sc);
2638
2639         /*
2640          * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
2641          * memory to insure that the chip has in fact read the first
2642          * entry of the ring.
2643          */
2644         if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
2645                 uint32_t                v, i;
2646                 for (i = 0; i < 10; i++) {
2647                         DELAY(20);
2648                         v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
2649                         if (v == (MCLBYTES - ETHER_ALIGN))
2650                                 break;
2651                 }
2652                 if (i == 10)
2653                         if_printf(ifp, "5705 A0 chip failed to load RX ring\n");
2654         }
2655
2656         /* Init jumbo RX ring. */
2657         if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2658                 bge_init_rx_ring_jumbo(sc);
2659
2660         /* Init our RX return ring index */
2661         sc->bge_rx_saved_considx = 0;
2662
2663         /* Init TX ring. */
2664         bge_init_tx_ring(sc);
2665
2666         /* Turn on transmitter */
2667         BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
2668
2669         /* Turn on receiver */
2670         BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2671
2672         /* Tell firmware we're alive. */
2673         BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2674
2675         /* Enable host interrupts. */
2676         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
2677         BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2678         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2679
2680         bge_ifmedia_upd(ifp);
2681
2682         ifp->if_flags |= IFF_RUNNING;
2683         ifp->if_flags &= ~IFF_OACTIVE;
2684
2685         callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2686 }
2687
2688 /*
2689  * Set media options.
2690  */
2691 static int
2692 bge_ifmedia_upd(struct ifnet *ifp)
2693 {
2694         struct bge_softc *sc = ifp->if_softc;
2695         struct ifmedia *ifm = &sc->bge_ifmedia;
2696         struct mii_data *mii;
2697
2698         /* If this is a 1000baseX NIC, enable the TBI port. */
2699         if (sc->bge_tbi) {
2700                 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2701                         return(EINVAL);
2702                 switch(IFM_SUBTYPE(ifm->ifm_media)) {
2703                 case IFM_AUTO:
2704                         /*
2705                          * The BCM5704 ASIC appears to have a special
2706                          * mechanism for programming the autoneg
2707                          * advertisement registers in TBI mode.
2708                          */
2709                         if (!bge_fake_autoneg &&
2710                             sc->bge_asicrev == BGE_ASICREV_BCM5704) {
2711                                 uint32_t sgdig;
2712
2713                                 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
2714                                 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
2715                                 sgdig |= BGE_SGDIGCFG_AUTO |
2716                                          BGE_SGDIGCFG_PAUSE_CAP |
2717                                          BGE_SGDIGCFG_ASYM_PAUSE;
2718                                 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
2719                                             sgdig | BGE_SGDIGCFG_SEND);
2720                                 DELAY(5);
2721                                 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
2722                         }
2723                         break;
2724                 case IFM_1000_SX:
2725                         if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
2726                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
2727                                     BGE_MACMODE_HALF_DUPLEX);
2728                         } else {
2729                                 BGE_SETBIT(sc, BGE_MAC_MODE,
2730                                     BGE_MACMODE_HALF_DUPLEX);
2731                         }
2732                         break;
2733                 default:
2734                         return(EINVAL);
2735                 }
2736                 return(0);
2737         }
2738
2739         mii = device_get_softc(sc->bge_miibus);
2740         sc->bge_link = 0;
2741         if (mii->mii_instance) {
2742                 struct mii_softc *miisc;
2743                 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2744                         mii_phy_reset(miisc);
2745         }
2746         mii_mediachg(mii);
2747
2748         return(0);
2749 }
2750
2751 /*
2752  * Report current media status.
2753  */
2754 static void
2755 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2756 {
2757         struct bge_softc *sc = ifp->if_softc;
2758         struct mii_data *mii;
2759
2760         if (sc->bge_tbi) {
2761                 ifmr->ifm_status = IFM_AVALID;
2762                 ifmr->ifm_active = IFM_ETHER;
2763                 if (CSR_READ_4(sc, BGE_MAC_STS) &
2764                     BGE_MACSTAT_TBI_PCS_SYNCHED)
2765                         ifmr->ifm_status |= IFM_ACTIVE;
2766                 ifmr->ifm_active |= IFM_1000_SX;
2767                 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
2768                         ifmr->ifm_active |= IFM_HDX;    
2769                 else
2770                         ifmr->ifm_active |= IFM_FDX;
2771                 return;
2772         }
2773
2774         mii = device_get_softc(sc->bge_miibus);
2775         mii_pollstat(mii);
2776         ifmr->ifm_active = mii->mii_media_active;
2777         ifmr->ifm_status = mii->mii_media_status;
2778 }
2779
2780 static int
2781 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
2782 {
2783         struct bge_softc *sc = ifp->if_softc;
2784         struct ifreq *ifr = (struct ifreq *) data;
2785         int mask, error = 0;
2786         struct mii_data *mii;
2787
2788         ASSERT_SERIALIZED(ifp->if_serializer);
2789
2790         switch(command) {
2791         case SIOCSIFMTU:
2792                 /* Disallow jumbo frames on 5705/5750. */
2793                 if (((sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2794                       sc->bge_asicrev == BGE_ASICREV_BCM5750) &&
2795                      ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
2796                         error = EINVAL;
2797                 else {
2798                         ifp->if_mtu = ifr->ifr_mtu;
2799                         ifp->if_flags &= ~IFF_RUNNING;
2800                         bge_init(sc);
2801                 }
2802                 break;
2803         case SIOCSIFFLAGS:
2804                 if (ifp->if_flags & IFF_UP) {
2805                         if (ifp->if_flags & IFF_RUNNING) {
2806                                 int flags = ifp->if_flags & sc->bge_if_flags;
2807
2808                                 /*
2809                                  * If only the state of the PROMISC flag
2810                                  * changed, then just use the 'set promisc
2811                                  * mode' command instead of reinitializing
2812                                  * the entire NIC. Doing a full re-init
2813                                  * means reloading the firmware and waiting
2814                                  * for it to start up, which may take a
2815                                  * second or two.  Similarly for ALLMULTI.
2816                                  */
2817                                 if (flags & IFF_PROMISC)
2818                                         bge_setpromisc(sc);
2819                                 if (flags & IFF_ALLMULTI)
2820                                         bge_setmulti(sc);
2821                         } else {
2822                                 bge_init(sc);
2823                         }
2824                 } else {
2825                         if (ifp->if_flags & IFF_RUNNING)
2826                                 bge_stop(sc);
2827                 }
2828                 sc->bge_if_flags = ifp->if_flags;
2829                 error = 0;
2830                 break;
2831         case SIOCADDMULTI:
2832         case SIOCDELMULTI:
2833                 if (ifp->if_flags & IFF_RUNNING) {
2834                         bge_setmulti(sc);
2835                         error = 0;
2836                 }
2837                 break;
2838         case SIOCSIFMEDIA:
2839         case SIOCGIFMEDIA:
2840                 if (sc->bge_tbi) {
2841                         error = ifmedia_ioctl(ifp, ifr,
2842                             &sc->bge_ifmedia, command);
2843                 } else {
2844                         mii = device_get_softc(sc->bge_miibus);
2845                         error = ifmedia_ioctl(ifp, ifr,
2846                             &mii->mii_media, command);
2847                 }
2848                 break;
2849         case SIOCSIFCAP:
2850                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2851                 if (mask & IFCAP_HWCSUM) {
2852                         ifp->if_capenable ^= IFCAP_HWCSUM;
2853                         if (IFCAP_HWCSUM & ifp->if_capenable)
2854                                 ifp->if_hwassist = BGE_CSUM_FEATURES;
2855                         else
2856                                 ifp->if_hwassist = 0;
2857                 }
2858                 error = 0;
2859                 break;
2860         default:
2861                 error = ether_ioctl(ifp, command, data);
2862                 break;
2863         }
2864         return(error);
2865 }
2866
2867 static void
2868 bge_watchdog(struct ifnet *ifp)
2869 {
2870         struct bge_softc *sc = ifp->if_softc;
2871
2872         if_printf(ifp, "watchdog timeout -- resetting\n");
2873
2874         ifp->if_flags &= ~IFF_RUNNING;
2875         bge_init(sc);
2876
2877         ifp->if_oerrors++;
2878
2879         if (!ifq_is_empty(&ifp->if_snd))
2880                 ifp->if_start(ifp);
2881 }
2882
2883 /*
2884  * Stop the adapter and free any mbufs allocated to the
2885  * RX and TX lists.
2886  */
2887 static void
2888 bge_stop(struct bge_softc *sc)
2889 {
2890         struct ifnet *ifp = &sc->arpcom.ac_if;
2891         struct ifmedia_entry *ifm;
2892         struct mii_data *mii = NULL;
2893         int mtmp, itmp;
2894
2895         ASSERT_SERIALIZED(ifp->if_serializer);
2896
2897         if (!sc->bge_tbi)
2898                 mii = device_get_softc(sc->bge_miibus);
2899
2900         callout_stop(&sc->bge_stat_timer);
2901
2902         /*
2903          * Disable all of the receiver blocks
2904          */
2905         BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2906         BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2907         BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2908         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2909             sc->bge_asicrev != BGE_ASICREV_BCM5750)
2910                 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2911         BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
2912         BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2913         BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
2914
2915         /*
2916          * Disable all of the transmit blocks
2917          */
2918         BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2919         BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2920         BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2921         BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
2922         BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
2923         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2924             sc->bge_asicrev != BGE_ASICREV_BCM5750)
2925                 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2926         BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2927
2928         /*
2929          * Shut down all of the memory managers and related
2930          * state machines.
2931          */
2932         BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
2933         BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
2934         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2935             sc->bge_asicrev != BGE_ASICREV_BCM5750)
2936                 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2937         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
2938         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
2939         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2940             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2941                 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
2942                 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2943         }
2944
2945         /* Disable host interrupts. */
2946         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2947         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2948
2949         /*
2950          * Tell firmware we're shutting down.
2951          */
2952         BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2953
2954         /* Free the RX lists. */
2955         bge_free_rx_ring_std(sc);
2956
2957         /* Free jumbo RX list. */
2958         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2959             sc->bge_asicrev != BGE_ASICREV_BCM5750)
2960                 bge_free_rx_ring_jumbo(sc);
2961
2962         /* Free TX buffers. */
2963         bge_free_tx_ring(sc);
2964
2965         /*
2966          * Isolate/power down the PHY, but leave the media selection
2967          * unchanged so that things will be put back to normal when
2968          * we bring the interface back up.
2969          */
2970         if (!sc->bge_tbi) {
2971                 itmp = ifp->if_flags;
2972                 ifp->if_flags |= IFF_UP;
2973                 ifm = mii->mii_media.ifm_cur;
2974                 mtmp = ifm->ifm_media;
2975                 ifm->ifm_media = IFM_ETHER|IFM_NONE;
2976                 mii_mediachg(mii);
2977                 ifm->ifm_media = mtmp;
2978                 ifp->if_flags = itmp;
2979         }
2980
2981         sc->bge_link = 0;
2982
2983         sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
2984
2985         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2986 }
2987
2988 /*
2989  * Stop all chip I/O so that the kernel's probe routines don't
2990  * get confused by errant DMAs when rebooting.
2991  */
2992 static void
2993 bge_shutdown(device_t dev)
2994 {
2995         struct bge_softc *sc = device_get_softc(dev);
2996         struct ifnet *ifp = &sc->arpcom.ac_if;
2997
2998         lwkt_serialize_enter(ifp->if_serializer);
2999         bge_stop(sc);
3000         bge_reset(sc);
3001         lwkt_serialize_exit(ifp->if_serializer);
3002 }
3003
3004 static int
3005 bge_suspend(device_t dev)
3006 {
3007         struct bge_softc *sc = device_get_softc(dev);
3008         struct ifnet *ifp = &sc->arpcom.ac_if;
3009
3010         lwkt_serialize_enter(ifp->if_serializer);
3011         bge_stop(sc);
3012         lwkt_serialize_exit(ifp->if_serializer);
3013
3014         return 0;
3015 }
3016
3017 static int
3018 bge_resume(device_t dev)
3019 {
3020         struct bge_softc *sc = device_get_softc(dev);
3021         struct ifnet *ifp = &sc->arpcom.ac_if;
3022
3023         lwkt_serialize_enter(ifp->if_serializer);
3024
3025         if (ifp->if_flags & IFF_UP) {
3026                 bge_init(sc);
3027
3028                 if (!ifq_is_empty(&ifp->if_snd))
3029                         ifp->if_start(ifp);
3030         }
3031
3032         lwkt_serialize_exit(ifp->if_serializer);
3033
3034         return 0;
3035 }
3036
3037 static void
3038 bge_setpromisc(struct bge_softc *sc)
3039 {
3040         struct ifnet *ifp = &sc->arpcom.ac_if;
3041
3042         if (ifp->if_flags & IFF_PROMISC)
3043                 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3044         else
3045                 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3046 }
3047
3048 static void
3049 bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
3050 {
3051         struct bge_dmamap_arg *ctx = arg;
3052
3053         if (error)
3054                 return;
3055
3056         KASSERT(nsegs == 1 && ctx->bge_maxsegs == 1,
3057                 ("only one segment is allowed\n"));
3058
3059         ctx->bge_segs[0] = *segs;
3060 }
3061
3062 static void
3063 bge_dma_map_mbuf(void *arg, bus_dma_segment_t *segs, int nsegs,
3064                  bus_size_t mapsz __unused, int error)
3065 {
3066         struct bge_dmamap_arg *ctx = arg;
3067         int i;
3068
3069         if (error)
3070                 return;
3071
3072         if (nsegs > ctx->bge_maxsegs) {
3073                 ctx->bge_maxsegs = 0;
3074                 return;
3075         }
3076
3077         ctx->bge_maxsegs = nsegs;
3078         for (i = 0; i < nsegs; ++i)
3079                 ctx->bge_segs[i] = segs[i];
3080 }
3081
3082 static void
3083 bge_dma_free(struct bge_softc *sc)
3084 {
3085         int i;
3086
3087         /* Destroy RX/TX mbuf DMA stuffs. */
3088         if (sc->bge_cdata.bge_mtag != NULL) {
3089                 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3090                         if (sc->bge_cdata.bge_rx_std_dmamap[i]) {
3091                                 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3092                                     sc->bge_cdata.bge_rx_std_dmamap[i]);
3093                         }
3094                 }
3095
3096                 for (i = 0; i < BGE_TX_RING_CNT; i++) {
3097                         if (sc->bge_cdata.bge_tx_dmamap[i]) {
3098                                 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3099                                     sc->bge_cdata.bge_tx_dmamap[i]);
3100                         }
3101                 }
3102                 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
3103         }
3104
3105         /* Destroy standard RX ring */
3106         bge_dma_block_free(sc->bge_cdata.bge_rx_std_ring_tag,
3107                            sc->bge_cdata.bge_rx_std_ring_map,
3108                            sc->bge_ldata.bge_rx_std_ring);
3109
3110         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3111             sc->bge_asicrev != BGE_ASICREV_BCM5750)
3112                 bge_free_jumbo_mem(sc);
3113
3114         /* Destroy RX return ring */
3115         bge_dma_block_free(sc->bge_cdata.bge_rx_return_ring_tag,
3116                            sc->bge_cdata.bge_rx_return_ring_map,
3117                            sc->bge_ldata.bge_rx_return_ring);
3118
3119         /* Destroy TX ring */
3120         bge_dma_block_free(sc->bge_cdata.bge_tx_ring_tag,
3121                            sc->bge_cdata.bge_tx_ring_map,
3122                            sc->bge_ldata.bge_tx_ring);
3123
3124         /* Destroy status block */
3125         bge_dma_block_free(sc->bge_cdata.bge_status_tag,
3126                            sc->bge_cdata.bge_status_map,
3127                            sc->bge_ldata.bge_status_block);
3128
3129         /* Destroy statistics block */
3130         bge_dma_block_free(sc->bge_cdata.bge_stats_tag,
3131                            sc->bge_cdata.bge_stats_map,
3132                            sc->bge_ldata.bge_stats);
3133
3134         /* Destroy the parent tag */
3135         if (sc->bge_cdata.bge_parent_tag != NULL)
3136                 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
3137 }
3138
3139 static int
3140 bge_dma_alloc(struct bge_softc *sc)
3141 {
3142         struct ifnet *ifp = &sc->arpcom.ac_if;
3143         int nseg, i, error;
3144
3145         /*
3146          * Allocate the parent bus DMA tag appropriate for PCI.
3147          */
3148         error = bus_dma_tag_create(NULL, 1, 0,
3149                                    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3150                                    NULL, NULL,
3151                                    MAXBSIZE, BGE_NSEG_NEW,
3152                                    BUS_SPACE_MAXSIZE_32BIT,
3153                                    0, &sc->bge_cdata.bge_parent_tag);
3154         if (error) {
3155                 if_printf(ifp, "could not allocate parent dma tag\n");
3156                 return error;
3157         }
3158
3159         /*
3160          * Create DMA tag for mbufs.
3161          */
3162         nseg = BGE_NSEG_NEW;
3163         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0,
3164                                    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3165                                    NULL, NULL,
3166                                    MCLBYTES * nseg, nseg, MCLBYTES,
3167                                    BUS_DMA_ALLOCNOW, &sc->bge_cdata.bge_mtag);
3168         if (error) {
3169                 if_printf(ifp, "could not allocate mbuf dma tag\n");
3170                 return error;
3171         }
3172
3173         /*
3174          * Create DMA maps for TX/RX mbufs.
3175          */
3176         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3177                 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
3178                                           &sc->bge_cdata.bge_rx_std_dmamap[i]);
3179                 if (error) {
3180                         int j;
3181
3182                         for (j = 0; j < i; ++j) {
3183                                 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3184                                         sc->bge_cdata.bge_rx_std_dmamap[j]);
3185                         }
3186                         bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
3187                         sc->bge_cdata.bge_mtag = NULL;
3188
3189                         if_printf(ifp, "could not create DMA map for RX\n");
3190                         return error;
3191                 }
3192         }
3193
3194         for (i = 0; i < BGE_TX_RING_CNT; i++) {
3195                 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
3196                                           &sc->bge_cdata.bge_tx_dmamap[i]);
3197                 if (error) {
3198                         int j;
3199
3200                         for (j = 0; j < BGE_STD_RX_RING_CNT; ++j) {
3201                                 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3202                                         sc->bge_cdata.bge_rx_std_dmamap[j]);
3203                         }
3204                         for (j = 0; j < i; ++j) {
3205                                 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3206                                         sc->bge_cdata.bge_tx_dmamap[j]);
3207                         }
3208                         bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
3209                         sc->bge_cdata.bge_mtag = NULL;
3210
3211                         if_printf(ifp, "could not create DMA map for TX\n");
3212                         return error;
3213                 }
3214         }
3215
3216         /*
3217          * Create DMA stuffs for standard RX ring.
3218          */
3219         error = bge_dma_block_alloc(sc, BGE_STD_RX_RING_SZ,
3220                                     &sc->bge_cdata.bge_rx_std_ring_tag,
3221                                     &sc->bge_cdata.bge_rx_std_ring_map,
3222                                     (void **)&sc->bge_ldata.bge_rx_std_ring,
3223                                     &sc->bge_ldata.bge_rx_std_ring_paddr);
3224         if (error) {
3225                 if_printf(ifp, "could not create std RX ring\n");
3226                 return error;
3227         }
3228
3229         /*
3230          * Create jumbo buffer pool.
3231          */
3232         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3233             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
3234                 error = bge_alloc_jumbo_mem(sc);
3235                 if (error) {
3236                         if_printf(ifp, "could not create jumbo buffer pool\n");
3237                         return error;
3238                 }
3239         }
3240
3241         /*
3242          * Create DMA stuffs for RX return ring.
3243          */
3244         error = bge_dma_block_alloc(sc, BGE_RX_RTN_RING_SZ(sc),
3245                                     &sc->bge_cdata.bge_rx_return_ring_tag,
3246                                     &sc->bge_cdata.bge_rx_return_ring_map,
3247                                     (void **)&sc->bge_ldata.bge_rx_return_ring,
3248                                     &sc->bge_ldata.bge_rx_return_ring_paddr);
3249         if (error) {
3250                 if_printf(ifp, "could not create RX ret ring\n");
3251                 return error;
3252         }
3253
3254         /*
3255          * Create DMA stuffs for TX ring.
3256          */
3257         error = bge_dma_block_alloc(sc, BGE_TX_RING_SZ,
3258                                     &sc->bge_cdata.bge_tx_ring_tag,
3259                                     &sc->bge_cdata.bge_tx_ring_map,
3260                                     (void **)&sc->bge_ldata.bge_tx_ring,
3261                                     &sc->bge_ldata.bge_tx_ring_paddr);
3262         if (error) {
3263                 if_printf(ifp, "could not create TX ring\n");
3264                 return error;
3265         }
3266
3267         /*
3268          * Create DMA stuffs for status block.
3269          */
3270         error = bge_dma_block_alloc(sc, BGE_STATUS_BLK_SZ,
3271                                     &sc->bge_cdata.bge_status_tag,
3272                                     &sc->bge_cdata.bge_status_map,
3273                                     (void **)&sc->bge_ldata.bge_status_block,
3274                                     &sc->bge_ldata.bge_status_block_paddr);
3275         if (error) {
3276                 if_printf(ifp, "could not create status block\n");
3277                 return error;
3278         }
3279
3280         /*
3281          * Create DMA stuffs for statistics block.
3282          */
3283         error = bge_dma_block_alloc(sc, BGE_STATS_SZ,
3284                                     &sc->bge_cdata.bge_stats_tag,
3285                                     &sc->bge_cdata.bge_stats_map,
3286                                     (void **)&sc->bge_ldata.bge_stats,
3287                                     &sc->bge_ldata.bge_stats_paddr);
3288         if (error) {
3289                 if_printf(ifp, "could not create stats block\n");
3290                 return error;
3291         }
3292         return 0;
3293 }
3294
3295 static int
3296 bge_dma_block_alloc(struct bge_softc *sc, bus_size_t size, bus_dma_tag_t *tag,
3297                     bus_dmamap_t *map, void **addr, bus_addr_t *paddr)
3298 {
3299         struct ifnet *ifp = &sc->arpcom.ac_if;
3300         struct bge_dmamap_arg ctx;
3301         bus_dma_segment_t seg;
3302         int error;
3303
3304         /*
3305          * Create DMA tag
3306          */
3307         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0,
3308                                    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3309                                    NULL, NULL, size, 1, size, 0, tag);
3310         if (error) {
3311                 if_printf(ifp, "could not allocate dma tag\n");
3312                 return error;
3313         }
3314
3315         /*
3316          * Allocate DMA'able memory
3317          */
3318         error = bus_dmamem_alloc(*tag, addr, BUS_DMA_WAITOK | BUS_DMA_ZERO,
3319                                  map);
3320         if (error) {
3321                 if_printf(ifp, "could not allocate dma memory\n");
3322                 bus_dma_tag_destroy(*tag);
3323                 *tag = NULL;
3324                 return error;
3325         }
3326
3327         /*
3328          * Load the DMA'able memory
3329          */
3330         ctx.bge_maxsegs = 1;
3331         ctx.bge_segs = &seg;
3332         error = bus_dmamap_load(*tag, *map, *addr, size, bge_dma_map_addr, &ctx,
3333                                 BUS_DMA_WAITOK);
3334         if (error) {
3335                 if_printf(ifp, "could not load dma memory\n");
3336                 bus_dmamem_free(*tag, *addr, *map);
3337                 bus_dma_tag_destroy(*tag);
3338                 *tag = NULL;
3339                 return error;
3340         }
3341         *paddr = ctx.bge_segs[0].ds_addr;
3342
3343         return 0;
3344 }
3345
3346 static void
3347 bge_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr)
3348 {
3349         if (tag != NULL) {
3350                 bus_dmamap_unload(tag, map);
3351                 bus_dmamem_free(tag, addr, map);
3352                 bus_dma_tag_destroy(tag);
3353         }
3354 }