- Don't use the status block's link state change bit to micro-optimize
[dragonfly.git] / sys / dev / netif / bge / if_bge.c
1 /*
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
34  * $DragonFly: src/sys/dev/netif/bge/if_bge.c,v 1.72 2007/04/30 14:05:22 sephe Exp $
35  *
36  */
37
38 /*
39  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
40  * 
41  * Written by Bill Paul <wpaul@windriver.com>
42  * Senior Engineer, Wind River Systems
43  */
44
45 /*
46  * The Broadcom BCM5700 is based on technology originally developed by
47  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
49  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
51  * frames, highly configurable RX filtering, and 16 RX and TX queues
52  * (which, along with RX filter rules, can be used for QOS applications).
53  * Other features, such as TCP segmentation, may be available as part
54  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55  * firmware images can be stored in hardware and need not be compiled
56  * into the driver.
57  *
58  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
60  * 
61  * The BCM5701 is a single-chip solution incorporating both the BCM5700
62  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63  * does not support external SSRAM.
64  *
65  * Broadcom also produces a variation of the BCM5700 under the "Altima"
66  * brand name, which is functionally similar but lacks PCI-X support.
67  *
68  * Without external SSRAM, you can only have at most 4 TX rings,
69  * and the use of the mini RX ring is disabled. This seems to imply
70  * that these features are simply not available on the BCM5701. As a
71  * result, this driver does not implement any support for the mini RX
72  * ring.
73  */
74
75 #include <sys/param.h>
76 #include <sys/bus.h>
77 #include <sys/endian.h>
78 #include <sys/kernel.h>
79 #include <sys/mbuf.h>
80 #include <sys/malloc.h>
81 #include <sys/queue.h>
82 #include <sys/rman.h>
83 #include <sys/serialize.h>
84 #include <sys/socket.h>
85 #include <sys/sockio.h>
86
87 #include <net/bpf.h>
88 #include <net/ethernet.h>
89 #include <net/if.h>
90 #include <net/if_arp.h>
91 #include <net/if_dl.h>
92 #include <net/if_media.h>
93 #include <net/if_types.h>
94 #include <net/ifq_var.h>
95 #include <net/vlan/if_vlan_var.h>
96
97 #include <dev/netif/mii_layer/mii.h>
98 #include <dev/netif/mii_layer/miivar.h>
99 #include <dev/netif/mii_layer/brgphyreg.h>
100
101 #include <bus/pci/pcidevs.h>
102 #include <bus/pci/pcireg.h>
103 #include <bus/pci/pcivar.h>
104
105 #include <dev/netif/bge/if_bgereg.h>
106
107 /* "device miibus" required.  See GENERIC if you get errors here. */
108 #include "miibus_if.h"
109
110 #define BGE_CSUM_FEATURES       (CSUM_IP | CSUM_TCP | CSUM_UDP)
111 #define BGE_MIN_FRAME           60
112
113 /*
114  * Various supported device vendors/types and their names. Note: the
115  * spec seems to indicate that the hardware still has Alteon's vendor
116  * ID burned into it, though it will always be overriden by the vendor
117  * ID in the EEPROM. Just to be safe, we cover all possibilities.
118  */
119 #define BGE_DEVDESC_MAX         64      /* Maximum device description length */
120
121 static struct bge_type bge_devs[] = {
122         { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700,
123                 "Alteon BCM5700 Gigabit Ethernet" },
124         { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701,
125                 "Alteon BCM5701 Gigabit Ethernet" },
126         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700,
127                 "Broadcom BCM5700 Gigabit Ethernet" },
128         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701,
129                 "Broadcom BCM5701 Gigabit Ethernet" },
130         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X,
131                 "Broadcom BCM5702X Gigabit Ethernet" },
132         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT,
133                 "Broadcom BCM5702 Gigabit Ethernet" },
134         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X,
135                 "Broadcom BCM5703X Gigabit Ethernet" },
136         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703A3,
137                 "Broadcom BCM5703 Gigabit Ethernet" },
138         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C,
139                 "Broadcom BCM5704C Dual Gigabit Ethernet" },
140         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S,
141                 "Broadcom BCM5704S Dual Gigabit Ethernet" },
142         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705,
143                 "Broadcom BCM5705 Gigabit Ethernet" },
144         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K,
145                 "Broadcom BCM5705K Gigabit Ethernet" },
146         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M,
147                 "Broadcom BCM5705M Gigabit Ethernet" },
148         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT,
149                 "Broadcom BCM5705M Gigabit Ethernet" },
150         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714,
151                 "Broadcom BCM5714C Gigabit Ethernet" },
152         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721,
153                 "Broadcom BCM5721 Gigabit Ethernet" },
154         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750,
155                 "Broadcom BCM5750 Gigabit Ethernet" },
156         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750M,
157                 "Broadcom BCM5750M Gigabit Ethernet" },
158         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751,
159                 "Broadcom BCM5751 Gigabit Ethernet" },
160         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M,
161                 "Broadcom BCM5751M Gigabit Ethernet" },
162         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752,
163                 "Broadcom BCM5752 Gigabit Ethernet" },
164         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782,
165                 "Broadcom BCM5782 Gigabit Ethernet" },
166         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788,
167                 "Broadcom BCM5788 Gigabit Ethernet" },
168         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789,
169                 "Broadcom BCM5789 Gigabit Ethernet" },
170         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901,
171                 "Broadcom BCM5901 Fast Ethernet" },
172         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2,
173                 "Broadcom BCM5901A2 Fast Ethernet" },
174         { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
175                 "SysKonnect Gigabit Ethernet" },
176         { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000,
177                 "Altima AC1000 Gigabit Ethernet" },
178         { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001,
179                 "Altima AC1002 Gigabit Ethernet" },
180         { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100,
181                 "Altima AC9100 Gigabit Ethernet" },
182         { 0, 0, NULL }
183 };
184
185 static int      bge_probe(device_t);
186 static int      bge_attach(device_t);
187 static int      bge_detach(device_t);
188 static void     bge_release_resources(struct bge_softc *);
189 static void     bge_txeof(struct bge_softc *);
190 static void     bge_rxeof(struct bge_softc *);
191
192 static void     bge_tick(void *);
193 static void     bge_tick_serialized(void *);
194 static void     bge_stats_update(struct bge_softc *);
195 static void     bge_stats_update_regs(struct bge_softc *);
196 static int      bge_encap(struct bge_softc *, struct mbuf *, uint32_t *);
197
198 static void     bge_intr(void *);
199 static void     bge_start(struct ifnet *);
200 static int      bge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
201 static void     bge_init(void *);
202 static void     bge_stop(struct bge_softc *);
203 static void     bge_watchdog(struct ifnet *);
204 static void     bge_shutdown(device_t);
205 static int      bge_suspend(device_t);
206 static int      bge_resume(device_t);
207 static int      bge_ifmedia_upd(struct ifnet *);
208 static void     bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
209
210 static uint8_t  bge_eeprom_getbyte(struct bge_softc *, uint32_t, uint8_t *);
211 static int      bge_read_eeprom(struct bge_softc *, caddr_t, uint32_t, size_t);
212
213 static void     bge_setmulti(struct bge_softc *);
214 static void     bge_setpromisc(struct bge_softc *);
215
216 static int      bge_alloc_jumbo_mem(struct bge_softc *);
217 static void     bge_free_jumbo_mem(struct bge_softc *);
218 static struct bge_jslot
219                 *bge_jalloc(struct bge_softc *);
220 static void     bge_jfree(void *);
221 static void     bge_jref(void *);
222 static int      bge_newbuf_std(struct bge_softc *, int, struct mbuf *);
223 static int      bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
224 static int      bge_init_rx_ring_std(struct bge_softc *);
225 static void     bge_free_rx_ring_std(struct bge_softc *);
226 static int      bge_init_rx_ring_jumbo(struct bge_softc *);
227 static void     bge_free_rx_ring_jumbo(struct bge_softc *);
228 static void     bge_free_tx_ring(struct bge_softc *);
229 static int      bge_init_tx_ring(struct bge_softc *);
230
231 static int      bge_chipinit(struct bge_softc *);
232 static int      bge_blockinit(struct bge_softc *);
233
234 static uint32_t bge_readmem_ind(struct bge_softc *, uint32_t);
235 static void     bge_writemem_ind(struct bge_softc *, uint32_t, uint32_t);
236 #ifdef notdef
237 static uint32_t bge_readreg_ind(struct bge_softc *, uint32_t);
238 #endif
239 static void     bge_writereg_ind(struct bge_softc *, uint32_t, uint32_t);
240
241 static int      bge_miibus_readreg(device_t, int, int);
242 static int      bge_miibus_writereg(device_t, int, int, int);
243 static void     bge_miibus_statchg(device_t);
244
245 static void     bge_reset(struct bge_softc *);
246
247 static void     bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
248 static void     bge_dma_map_mbuf(void *, bus_dma_segment_t *, int,
249                                  bus_size_t, int);
250 static int      bge_dma_alloc(struct bge_softc *);
251 static void     bge_dma_free(struct bge_softc *);
252 static int      bge_dma_block_alloc(struct bge_softc *, bus_size_t,
253                                     bus_dma_tag_t *, bus_dmamap_t *,
254                                     void **, bus_addr_t *);
255 static void     bge_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
256
257 /*
258  * Set following tunable to 1 for some IBM blade servers with the DNLK
259  * switch module. Auto negotiation is broken for those configurations.
260  */
261 static int      bge_fake_autoneg = 0;
262 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg);
263
264 static device_method_t bge_methods[] = {
265         /* Device interface */
266         DEVMETHOD(device_probe,         bge_probe),
267         DEVMETHOD(device_attach,        bge_attach),
268         DEVMETHOD(device_detach,        bge_detach),
269         DEVMETHOD(device_shutdown,      bge_shutdown),
270         DEVMETHOD(device_suspend,       bge_suspend),
271         DEVMETHOD(device_resume,        bge_resume),
272
273         /* bus interface */
274         DEVMETHOD(bus_print_child,      bus_generic_print_child),
275         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
276
277         /* MII interface */
278         DEVMETHOD(miibus_readreg,       bge_miibus_readreg),
279         DEVMETHOD(miibus_writereg,      bge_miibus_writereg),
280         DEVMETHOD(miibus_statchg,       bge_miibus_statchg),
281
282         { 0, 0 }
283 };
284
285 static DEFINE_CLASS_0(bge, bge_driver, bge_methods, sizeof(struct bge_softc));
286 static devclass_t bge_devclass;
287
288 DECLARE_DUMMY_MODULE(if_bge);
289 DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, 0, 0);
290 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
291
292 static uint32_t
293 bge_readmem_ind(struct bge_softc *sc, uint32_t off)
294 {
295         device_t dev = sc->bge_dev;
296
297         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
298         return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
299 }
300
301 static void
302 bge_writemem_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
303 {
304         device_t dev = sc->bge_dev;
305
306         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
307         pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
308 }
309
310 #ifdef notdef
311 static uint32_t
312 bge_readreg_ind(struct bge_softc *sc, uin32_t off)
313 {
314         device_t dev = sc->bge_dev;
315
316         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
317         return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
318 }
319 #endif
320
321 static void
322 bge_writereg_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
323 {
324         device_t dev = sc->bge_dev;
325
326         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
327         pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
328 }
329
330 /*
331  * Read a byte of data stored in the EEPROM at address 'addr.' The
332  * BCM570x supports both the traditional bitbang interface and an
333  * auto access interface for reading the EEPROM. We use the auto
334  * access method.
335  */
336 static uint8_t
337 bge_eeprom_getbyte(struct bge_softc *sc, uint32_t addr, uint8_t *dest)
338 {
339         int i;
340         uint32_t byte = 0;
341
342         /*
343          * Enable use of auto EEPROM access so we can avoid
344          * having to use the bitbang method.
345          */
346         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
347
348         /* Reset the EEPROM, load the clock period. */
349         CSR_WRITE_4(sc, BGE_EE_ADDR,
350             BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
351         DELAY(20);
352
353         /* Issue the read EEPROM command. */
354         CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
355
356         /* Wait for completion */
357         for(i = 0; i < BGE_TIMEOUT * 10; i++) {
358                 DELAY(10);
359                 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
360                         break;
361         }
362
363         if (i == BGE_TIMEOUT) {
364                 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
365                 return(1);
366         }
367
368         /* Get result. */
369         byte = CSR_READ_4(sc, BGE_EE_DATA);
370
371         *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
372
373         return(0);
374 }
375
376 /*
377  * Read a sequence of bytes from the EEPROM.
378  */
379 static int
380 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, uint32_t off, size_t len)
381 {
382         size_t i;
383         int err;
384         uint8_t byte;
385
386         for (byte = 0, err = 0, i = 0; i < len; i++) {
387                 err = bge_eeprom_getbyte(sc, off + i, &byte);
388                 if (err)
389                         break;
390                 *(dest + i) = byte;
391         }
392
393         return(err ? 1 : 0);
394 }
395
396 static int
397 bge_miibus_readreg(device_t dev, int phy, int reg)
398 {
399         struct bge_softc *sc;
400         struct ifnet *ifp;
401         uint32_t val, autopoll;
402         int i;
403
404         sc = device_get_softc(dev);
405         ifp = &sc->arpcom.ac_if;
406
407         /*
408          * Broadcom's own driver always assumes the internal
409          * PHY is at GMII address 1. On some chips, the PHY responds
410          * to accesses at all addresses, which could cause us to
411          * bogusly attach the PHY 32 times at probe type. Always
412          * restricting the lookup to address 1 is simpler than
413          * trying to figure out which chips revisions should be
414          * special-cased.
415          */
416         if (phy != 1)
417                 return(0);
418
419         /* Reading with autopolling on may trigger PCI errors */
420         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
421         if (autopoll & BGE_MIMODE_AUTOPOLL) {
422                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
423                 DELAY(40);
424         }
425
426         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
427             BGE_MIPHY(phy)|BGE_MIREG(reg));
428
429         for (i = 0; i < BGE_TIMEOUT; i++) {
430                 val = CSR_READ_4(sc, BGE_MI_COMM);
431                 if (!(val & BGE_MICOMM_BUSY))
432                         break;
433         }
434
435         if (i == BGE_TIMEOUT) {
436                 if_printf(ifp, "PHY read timed out\n");
437                 val = 0;
438                 goto done;
439         }
440
441         val = CSR_READ_4(sc, BGE_MI_COMM);
442
443 done:
444         if (autopoll & BGE_MIMODE_AUTOPOLL) {
445                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
446                 DELAY(40);
447         }
448
449         if (val & BGE_MICOMM_READFAIL)
450                 return(0);
451
452         return(val & 0xFFFF);
453 }
454
455 static int
456 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
457 {
458         struct bge_softc *sc;
459         uint32_t autopoll;
460         int i;
461
462         sc = device_get_softc(dev);
463
464         /* Reading with autopolling on may trigger PCI errors */
465         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
466         if (autopoll & BGE_MIMODE_AUTOPOLL) {
467                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
468                 DELAY(40);
469         }
470
471         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
472             BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
473
474         for (i = 0; i < BGE_TIMEOUT; i++) {
475                 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
476                         break;
477         }
478
479         if (autopoll & BGE_MIMODE_AUTOPOLL) {
480                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
481                 DELAY(40);
482         }
483
484         if (i == BGE_TIMEOUT) {
485                 if_printf(&sc->arpcom.ac_if, "PHY read timed out\n");
486                 return(0);
487         }
488
489         return(0);
490 }
491
492 static void
493 bge_miibus_statchg(device_t dev)
494 {
495         struct bge_softc *sc;
496         struct mii_data *mii;
497
498         sc = device_get_softc(dev);
499         mii = device_get_softc(sc->bge_miibus);
500
501         BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
502         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
503                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
504         } else {
505                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
506         }
507
508         if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
509                 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
510         } else {
511                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
512         }
513 }
514
515 /*
516  * Memory management for jumbo frames.
517  */
518 static int
519 bge_alloc_jumbo_mem(struct bge_softc *sc)
520 {
521         struct ifnet *ifp = &sc->arpcom.ac_if;
522         struct bge_jslot *entry;
523         uint8_t *ptr;
524         bus_addr_t paddr;
525         int i, error;
526
527         /*
528          * Create tag for jumbo mbufs.
529          * This is really a bit of a kludge. We allocate a special
530          * jumbo buffer pool which (thanks to the way our DMA
531          * memory allocation works) will consist of contiguous
532          * pages. This means that even though a jumbo buffer might
533          * be larger than a page size, we don't really need to
534          * map it into more than one DMA segment. However, the
535          * default mbuf tag will result in multi-segment mappings,
536          * so we have to create a special jumbo mbuf tag that
537          * lets us get away with mapping the jumbo buffers as
538          * a single segment. I think eventually the driver should
539          * be changed so that it uses ordinary mbufs and cluster
540          * buffers, i.e. jumbo frames can span multiple DMA
541          * descriptors. But that's a project for another day.
542          */
543
544         /*
545          * Create DMA stuffs for jumbo RX ring.
546          */
547         error = bge_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ,
548                                     &sc->bge_cdata.bge_rx_jumbo_ring_tag,
549                                     &sc->bge_cdata.bge_rx_jumbo_ring_map,
550                                     (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
551                                     &sc->bge_ldata.bge_rx_jumbo_ring_paddr);
552         if (error) {
553                 if_printf(ifp, "could not create jumbo RX ring\n");
554                 return error;
555         }
556
557         /*
558          * Create DMA stuffs for jumbo buffer block.
559          */
560         error = bge_dma_block_alloc(sc, BGE_JMEM,
561                                     &sc->bge_cdata.bge_jumbo_tag,
562                                     &sc->bge_cdata.bge_jumbo_map,
563                                     (void **)&sc->bge_ldata.bge_jumbo_buf,
564                                     &paddr);
565         if (error) {
566                 if_printf(ifp, "could not create jumbo buffer\n");
567                 return error;
568         }
569
570         SLIST_INIT(&sc->bge_jfree_listhead);
571
572         /*
573          * Now divide it up into 9K pieces and save the addresses
574          * in an array. Note that we play an evil trick here by using
575          * the first few bytes in the buffer to hold the the address
576          * of the softc structure for this interface. This is because
577          * bge_jfree() needs it, but it is called by the mbuf management
578          * code which will not pass it to us explicitly.
579          */
580         for (i = 0, ptr = sc->bge_ldata.bge_jumbo_buf; i < BGE_JSLOTS; i++) {
581                 entry = &sc->bge_cdata.bge_jslots[i];
582                 entry->bge_sc = sc;
583                 entry->bge_buf = ptr;
584                 entry->bge_paddr = paddr;
585                 entry->bge_inuse = 0;
586                 entry->bge_slot = i;
587                 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jslot_link);
588
589                 ptr += BGE_JLEN;
590                 paddr += BGE_JLEN;
591         }
592         return 0;
593 }
594
595 static void
596 bge_free_jumbo_mem(struct bge_softc *sc)
597 {
598         /* Destroy jumbo RX ring. */
599         bge_dma_block_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
600                            sc->bge_cdata.bge_rx_jumbo_ring_map,
601                            sc->bge_ldata.bge_rx_jumbo_ring);
602
603         /* Destroy jumbo buffer block. */
604         bge_dma_block_free(sc->bge_cdata.bge_jumbo_tag,
605                            sc->bge_cdata.bge_jumbo_map,
606                            sc->bge_ldata.bge_jumbo_buf);
607 }
608
609 /*
610  * Allocate a jumbo buffer.
611  */
612 static struct bge_jslot *
613 bge_jalloc(struct bge_softc *sc)
614 {
615         struct bge_jslot *entry;
616
617         lwkt_serialize_enter(&sc->bge_jslot_serializer);
618         entry = SLIST_FIRST(&sc->bge_jfree_listhead);
619         if (entry) {
620                 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jslot_link);
621                 entry->bge_inuse = 1;
622         } else {
623                 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
624         }
625         lwkt_serialize_exit(&sc->bge_jslot_serializer);
626         return(entry);
627 }
628
629 /*
630  * Adjust usage count on a jumbo buffer.
631  */
632 static void
633 bge_jref(void *arg)
634 {
635         struct bge_jslot *entry = (struct bge_jslot *)arg;
636         struct bge_softc *sc = entry->bge_sc;
637
638         if (sc == NULL)
639                 panic("bge_jref: can't find softc pointer!");
640
641         if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
642                 panic("bge_jref: asked to reference buffer "
643                     "that we don't manage!");
644         } else if (entry->bge_inuse == 0) {
645                 panic("bge_jref: buffer already free!");
646         } else {
647                 atomic_add_int(&entry->bge_inuse, 1);
648         }
649 }
650
651 /*
652  * Release a jumbo buffer.
653  */
654 static void
655 bge_jfree(void *arg)
656 {
657         struct bge_jslot *entry = (struct bge_jslot *)arg;
658         struct bge_softc *sc = entry->bge_sc;
659
660         if (sc == NULL)
661                 panic("bge_jfree: can't find softc pointer!");
662
663         if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
664                 panic("bge_jfree: asked to free buffer that we don't manage!");
665         } else if (entry->bge_inuse == 0) {
666                 panic("bge_jfree: buffer already free!");
667         } else {
668                 /*
669                  * Possible MP race to 0, use the serializer.  The atomic insn
670                  * is still needed for races against bge_jref().
671                  */
672                 lwkt_serialize_enter(&sc->bge_jslot_serializer);
673                 atomic_subtract_int(&entry->bge_inuse, 1);
674                 if (entry->bge_inuse == 0) {
675                         SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 
676                                           entry, jslot_link);
677                 }
678                 lwkt_serialize_exit(&sc->bge_jslot_serializer);
679         }
680 }
681
682
683 /*
684  * Intialize a standard receive ring descriptor.
685  */
686 static int
687 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m)
688 {
689         struct mbuf *m_new = NULL;
690         struct bge_dmamap_arg ctx;
691         bus_dma_segment_t seg;
692         struct bge_rx_bd *r;
693         int error;
694
695         if (m == NULL) {
696                 m_new = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
697                 if (m_new == NULL)
698                         return ENOBUFS;
699         } else {
700                 m_new = m;
701                 m_new->m_data = m_new->m_ext.ext_buf;
702         }
703         m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
704
705         if (!sc->bge_rx_alignment_bug)
706                 m_adj(m_new, ETHER_ALIGN);
707
708         ctx.bge_maxsegs = 1;
709         ctx.bge_segs = &seg;
710         error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag,
711                                      sc->bge_cdata.bge_rx_std_dmamap[i],
712                                      m_new, bge_dma_map_mbuf, &ctx,
713                                      BUS_DMA_NOWAIT);
714         if (error || ctx.bge_maxsegs == 0) {
715                 if (m == NULL)
716                         m_freem(m_new);
717                 return ENOMEM;
718         }
719
720         sc->bge_cdata.bge_rx_std_chain[i] = m_new;
721
722         r = &sc->bge_ldata.bge_rx_std_ring[i];
723         r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_segs[0].ds_addr);
724         r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_segs[0].ds_addr);
725         r->bge_flags = BGE_RXBDFLAG_END;
726         r->bge_len = m_new->m_len;
727         r->bge_idx = i;
728
729         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
730                         sc->bge_cdata.bge_rx_std_dmamap[i],
731                         BUS_DMASYNC_PREREAD);
732         return 0;
733 }
734
735 /*
736  * Initialize a jumbo receive ring descriptor. This allocates
737  * a jumbo buffer from the pool managed internally by the driver.
738  */
739 static int
740 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
741 {
742         struct mbuf *m_new = NULL;
743         struct bge_jslot *buf;
744         struct bge_rx_bd *r;
745         bus_addr_t paddr;
746
747         if (m == NULL) {
748                 /* Allocate the mbuf. */
749                 MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
750                 if (m_new == NULL)
751                         return(ENOBUFS);
752
753                 /* Allocate the jumbo buffer */
754                 buf = bge_jalloc(sc);
755                 if (buf == NULL) {
756                         m_freem(m_new);
757                         if_printf(&sc->arpcom.ac_if, "jumbo allocation failed "
758                             "-- packet dropped!\n");
759                         return ENOBUFS;
760                 }
761
762                 /* Attach the buffer to the mbuf. */
763                 m_new->m_ext.ext_arg = buf;
764                 m_new->m_ext.ext_buf = buf->bge_buf;
765                 m_new->m_ext.ext_free = bge_jfree;
766                 m_new->m_ext.ext_ref = bge_jref;
767                 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
768
769                 m_new->m_flags |= M_EXT;
770         } else {
771                 KKASSERT(m->m_flags & M_EXT);
772                 m_new = m;
773                 buf = m_new->m_ext.ext_arg;
774         }
775         m_new->m_data = m_new->m_ext.ext_buf;
776         m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
777
778         paddr = buf->bge_paddr;
779         if (!sc->bge_rx_alignment_bug) {
780                 m_adj(m_new, ETHER_ALIGN);
781                 paddr += ETHER_ALIGN;
782         }
783
784         /* Set up the descriptor. */
785         sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
786
787         r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
788         r->bge_addr.bge_addr_lo = BGE_ADDR_LO(paddr);
789         r->bge_addr.bge_addr_hi = BGE_ADDR_HI(paddr);
790         r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
791         r->bge_len = m_new->m_len;
792         r->bge_idx = i;
793
794         return 0;
795 }
796
797 /*
798  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
799  * that's 1MB or memory, which is a lot. For now, we fill only the first
800  * 256 ring entries and hope that our CPU is fast enough to keep up with
801  * the NIC.
802  */
803 static int
804 bge_init_rx_ring_std(struct bge_softc *sc)
805 {
806         int i;
807
808         for (i = 0; i < BGE_SSLOTS; i++) {
809                 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
810                         return(ENOBUFS);
811         };
812
813         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
814                         sc->bge_cdata.bge_rx_std_ring_map,
815                         BUS_DMASYNC_PREWRITE);
816
817         sc->bge_std = i - 1;
818         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
819
820         return(0);
821 }
822
823 static void
824 bge_free_rx_ring_std(struct bge_softc *sc)
825 {
826         int i;
827
828         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
829                 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
830                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
831                                           sc->bge_cdata.bge_rx_std_dmamap[i]);
832                         m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
833                         sc->bge_cdata.bge_rx_std_chain[i] = NULL;
834                 }
835                 bzero(&sc->bge_ldata.bge_rx_std_ring[i],
836                     sizeof(struct bge_rx_bd));
837         }
838 }
839
840 static int
841 bge_init_rx_ring_jumbo(struct bge_softc *sc)
842 {
843         int i;
844         struct bge_rcb *rcb;
845
846         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
847                 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
848                         return(ENOBUFS);
849         };
850
851         bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
852                         sc->bge_cdata.bge_rx_jumbo_ring_map,
853                         BUS_DMASYNC_PREWRITE);
854
855         sc->bge_jumbo = i - 1;
856
857         rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
858         rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
859         CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
860
861         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
862
863         return(0);
864 }
865
866 static void
867 bge_free_rx_ring_jumbo(struct bge_softc *sc)
868 {
869         int i;
870
871         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
872                 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
873                         m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
874                         sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
875                 }
876                 bzero(&sc->bge_ldata.bge_rx_jumbo_ring[i],
877                     sizeof(struct bge_rx_bd));
878         }
879 }
880
881 static void
882 bge_free_tx_ring(struct bge_softc *sc)
883 {
884         int i;
885
886         for (i = 0; i < BGE_TX_RING_CNT; i++) {
887                 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
888                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
889                                           sc->bge_cdata.bge_tx_dmamap[i]);
890                         m_freem(sc->bge_cdata.bge_tx_chain[i]);
891                         sc->bge_cdata.bge_tx_chain[i] = NULL;
892                 }
893                 bzero(&sc->bge_ldata.bge_tx_ring[i],
894                     sizeof(struct bge_tx_bd));
895         }
896 }
897
898 static int
899 bge_init_tx_ring(struct bge_softc *sc)
900 {
901         sc->bge_txcnt = 0;
902         sc->bge_tx_saved_considx = 0;
903         sc->bge_tx_prodidx = 0;
904
905         /* Initialize transmit producer index for host-memory send ring. */
906         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
907
908         /* 5700 b2 errata */
909         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
910                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
911
912         CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
913         /* 5700 b2 errata */
914         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
915                 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
916
917         return(0);
918 }
919
920 static void
921 bge_setmulti(struct bge_softc *sc)
922 {
923         struct ifnet *ifp;
924         struct ifmultiaddr *ifma;
925         uint32_t hashes[4] = { 0, 0, 0, 0 };
926         int h, i;
927
928         ifp = &sc->arpcom.ac_if;
929
930         if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
931                 for (i = 0; i < 4; i++)
932                         CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
933                 return;
934         }
935
936         /* First, zot all the existing filters. */
937         for (i = 0; i < 4; i++)
938                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
939
940         /* Now program new ones. */
941         LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
942                 if (ifma->ifma_addr->sa_family != AF_LINK)
943                         continue;
944                 h = ether_crc32_le(
945                     LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
946                     ETHER_ADDR_LEN) & 0x7f;
947                 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
948         }
949
950         for (i = 0; i < 4; i++)
951                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
952 }
953
954 /*
955  * Do endian, PCI and DMA initialization. Also check the on-board ROM
956  * self-test results.
957  */
958 static int
959 bge_chipinit(struct bge_softc *sc)
960 {
961         int i;
962         uint32_t dma_rw_ctl;
963
964         /* Set endian type before we access any non-PCI registers. */
965         pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
966
967         /*
968          * Check the 'ROM failed' bit on the RX CPU to see if
969          * self-tests passed.
970          */
971         if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
972                 if_printf(&sc->arpcom.ac_if,
973                           "RX CPU self-diagnostics failed!\n");
974                 return(ENODEV);
975         }
976
977         /* Clear the MAC control register */
978         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
979
980         /*
981          * Clear the MAC statistics block in the NIC's
982          * internal memory.
983          */
984         for (i = BGE_STATS_BLOCK;
985             i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
986                 BGE_MEMWIN_WRITE(sc, i, 0);
987
988         for (i = BGE_STATUS_BLOCK;
989             i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
990                 BGE_MEMWIN_WRITE(sc, i, 0);
991
992         /* Set up the PCI DMA control register. */
993         if (sc->bge_pcie) {
994                 /* PCI Express */
995                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
996                     (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
997                     (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
998         } else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
999                    BGE_PCISTATE_PCI_BUSMODE) {
1000                 /* Conventional PCI bus */
1001                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1002                     (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1003                     (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1004                     (0x0F);
1005         } else {
1006                 /* PCI-X bus */
1007                 /*
1008                  * The 5704 uses a different encoding of read/write
1009                  * watermarks.
1010                  */
1011                 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1012                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1013                             (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1014                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1015                 else
1016                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1017                             (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1018                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1019                             (0x0F);
1020
1021                 /*
1022                  * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1023                  * for hardware bugs.
1024                  */
1025                 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1026                     sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1027                         uint32_t tmp;
1028
1029                         tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1030                         if (tmp == 0x6 || tmp == 0x7)
1031                                 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1032                 }
1033         }
1034
1035         if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1036             sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1037             sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1038             sc->bge_asicrev == BGE_ASICREV_BCM5750)
1039                 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1040         pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1041
1042         /*
1043          * Set up general mode register.
1044          */
1045         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1046             BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1047             BGE_MODECTL_TX_NO_PHDR_CSUM);
1048
1049         /*
1050          * Disable memory write invalidate.  Apparently it is not supported
1051          * properly by these devices.
1052          */
1053         PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1054
1055         /* Set the timer prescaler (always 66Mhz) */
1056         CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1057
1058         return(0);
1059 }
1060
1061 static int
1062 bge_blockinit(struct bge_softc *sc)
1063 {
1064         struct bge_rcb *rcb;
1065         bus_size_t vrcb;
1066         bge_hostaddr taddr;
1067         int i;
1068
1069         /*
1070          * Initialize the memory window pointer register so that
1071          * we can access the first 32K of internal NIC RAM. This will
1072          * allow us to set up the TX send ring RCBs and the RX return
1073          * ring RCBs, plus other things which live in NIC memory.
1074          */
1075         CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1076
1077         /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1078
1079         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1080             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1081                 /* Configure mbuf memory pool */
1082                 if (sc->bge_extram) {
1083                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1084                             BGE_EXT_SSRAM);
1085                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1086                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1087                         else
1088                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1089                 } else {
1090                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1091                             BGE_BUFFPOOL_1);
1092                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1093                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1094                         else
1095                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1096                 }
1097
1098                 /* Configure DMA resource pool */
1099                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1100                     BGE_DMA_DESCRIPTORS);
1101                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1102         }
1103
1104         /* Configure mbuf pool watermarks */
1105         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1106             sc->bge_asicrev == BGE_ASICREV_BCM5750) {
1107                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1108                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1109         } else {
1110                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1111                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1112         }
1113         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1114
1115         /* Configure DMA resource watermarks */
1116         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1117         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1118
1119         /* Enable buffer manager */
1120         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1121             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1122                 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1123                     BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1124
1125                 /* Poll for buffer manager start indication */
1126                 for (i = 0; i < BGE_TIMEOUT; i++) {
1127                         if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1128                                 break;
1129                         DELAY(10);
1130                 }
1131
1132                 if (i == BGE_TIMEOUT) {
1133                         if_printf(&sc->arpcom.ac_if,
1134                                   "buffer manager failed to start\n");
1135                         return(ENXIO);
1136                 }
1137         }
1138
1139         /* Enable flow-through queues */
1140         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1141         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1142
1143         /* Wait until queue initialization is complete */
1144         for (i = 0; i < BGE_TIMEOUT; i++) {
1145                 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1146                         break;
1147                 DELAY(10);
1148         }
1149
1150         if (i == BGE_TIMEOUT) {
1151                 if_printf(&sc->arpcom.ac_if,
1152                           "flow-through queue init failed\n");
1153                 return(ENXIO);
1154         }
1155
1156         /* Initialize the standard RX ring control block */
1157         rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1158         rcb->bge_hostaddr.bge_addr_lo =
1159             BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1160         rcb->bge_hostaddr.bge_addr_hi =
1161             BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1162         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1163             sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1164         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1165             sc->bge_asicrev == BGE_ASICREV_BCM5750)
1166                 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1167         else
1168                 rcb->bge_maxlen_flags =
1169                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1170         if (sc->bge_extram)
1171                 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1172         else
1173                 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1174         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1175         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1176         CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1177         CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1178
1179         /*
1180          * Initialize the jumbo RX ring control block
1181          * We set the 'ring disabled' bit in the flags
1182          * field until we're actually ready to start
1183          * using this ring (i.e. once we set the MTU
1184          * high enough to require it).
1185          */
1186         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1187             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1188                 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1189
1190                 rcb->bge_hostaddr.bge_addr_lo =
1191                     BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1192                 rcb->bge_hostaddr.bge_addr_hi =
1193                     BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1194                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1195                     sc->bge_cdata.bge_rx_jumbo_ring_map,
1196                     BUS_DMASYNC_PREREAD);
1197                 rcb->bge_maxlen_flags =
1198                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1199                     BGE_RCB_FLAG_RING_DISABLED);
1200                 if (sc->bge_extram)
1201                         rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1202                 else
1203                         rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1204                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1205                     rcb->bge_hostaddr.bge_addr_hi);
1206                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1207                     rcb->bge_hostaddr.bge_addr_lo);
1208                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1209                     rcb->bge_maxlen_flags);
1210                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1211
1212                 /* Set up dummy disabled mini ring RCB */
1213                 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1214                 rcb->bge_maxlen_flags =
1215                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1216                 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1217                     rcb->bge_maxlen_flags);
1218         }
1219
1220         /*
1221          * Set the BD ring replentish thresholds. The recommended
1222          * values are 1/8th the number of descriptors allocated to
1223          * each ring.
1224          */
1225         CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1226         CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1227
1228         /*
1229          * Disable all unused send rings by setting the 'ring disabled'
1230          * bit in the flags field of all the TX send ring control blocks.
1231          * These are located in NIC memory.
1232          */
1233         vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1234         for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1235                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1236                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1237                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1238                 vrcb += sizeof(struct bge_rcb);
1239         }
1240
1241         /* Configure TX RCB 0 (we use only the first ring) */
1242         vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1243         BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1244         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1245         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1246         RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1247             BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1248         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1249             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1250                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1251                     BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1252         }
1253
1254         /* Disable all unused RX return rings */
1255         vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1256         for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1257                 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1258                 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1259                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1260                     BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1261                     BGE_RCB_FLAG_RING_DISABLED));
1262                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1263                 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1264                     (i * (sizeof(uint64_t))), 0);
1265                 vrcb += sizeof(struct bge_rcb);
1266         }
1267
1268         /* Initialize RX ring indexes */
1269         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1270         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1271         CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1272
1273         /*
1274          * Set up RX return ring 0
1275          * Note that the NIC address for RX return rings is 0x00000000.
1276          * The return rings live entirely within the host, so the
1277          * nicaddr field in the RCB isn't used.
1278          */
1279         vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1280         BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1281         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1282         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1283         RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1284         RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1285             BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1286
1287         /* Set random backoff seed for TX */
1288         CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1289             sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1290             sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1291             sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1292             BGE_TX_BACKOFF_SEED_MASK);
1293
1294         /* Set inter-packet gap */
1295         CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1296
1297         /*
1298          * Specify which ring to use for packets that don't match
1299          * any RX rules.
1300          */
1301         CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1302
1303         /*
1304          * Configure number of RX lists. One interrupt distribution
1305          * list, sixteen active lists, one bad frames class.
1306          */
1307         CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1308
1309         /* Inialize RX list placement stats mask. */
1310         CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1311         CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1312
1313         /* Disable host coalescing until we get it set up */
1314         CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1315
1316         /* Poll to make sure it's shut down. */
1317         for (i = 0; i < BGE_TIMEOUT; i++) {
1318                 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1319                         break;
1320                 DELAY(10);
1321         }
1322
1323         if (i == BGE_TIMEOUT) {
1324                 if_printf(&sc->arpcom.ac_if,
1325                           "host coalescing engine failed to idle\n");
1326                 return(ENXIO);
1327         }
1328
1329         /* Set up host coalescing defaults */
1330         CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1331         CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1332         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1333         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1334         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1335             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1336                 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1337                 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1338         }
1339         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1340         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1341
1342         /* Set up address of statistics block */
1343         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1344             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1345                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1346                     BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1347                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1348                     BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1349
1350                 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1351                 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1352                 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1353         }
1354
1355         /* Set up address of status block */
1356         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1357             BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1358         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1359             BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1360         sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1361         sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1362
1363         /* Turn on host coalescing state machine */
1364         CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1365
1366         /* Turn on RX BD completion state machine and enable attentions */
1367         CSR_WRITE_4(sc, BGE_RBDC_MODE,
1368             BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1369
1370         /* Turn on RX list placement state machine */
1371         CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1372
1373         /* Turn on RX list selector state machine. */
1374         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1375             sc->bge_asicrev != BGE_ASICREV_BCM5750)
1376                 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1377
1378         /* Turn on DMA, clear stats */
1379         CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1380             BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1381             BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1382             BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1383             (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1384
1385         /* Set misc. local control, enable interrupts on attentions */
1386         CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1387
1388 #ifdef notdef
1389         /* Assert GPIO pins for PHY reset */
1390         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1391             BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1392         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1393             BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1394 #endif
1395
1396         /* Turn on DMA completion state machine */
1397         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1398             sc->bge_asicrev != BGE_ASICREV_BCM5750)
1399                 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1400
1401         /* Turn on write DMA state machine */
1402         CSR_WRITE_4(sc, BGE_WDMA_MODE,
1403             BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1404         
1405         /* Turn on read DMA state machine */
1406         CSR_WRITE_4(sc, BGE_RDMA_MODE,
1407             BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1408
1409         /* Turn on RX data completion state machine */
1410         CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1411
1412         /* Turn on RX BD initiator state machine */
1413         CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1414
1415         /* Turn on RX data and RX BD initiator state machine */
1416         CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1417
1418         /* Turn on Mbuf cluster free state machine */
1419         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1420             sc->bge_asicrev != BGE_ASICREV_BCM5750)
1421                 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1422
1423         /* Turn on send BD completion state machine */
1424         CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1425
1426         /* Turn on send data completion state machine */
1427         CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1428
1429         /* Turn on send data initiator state machine */
1430         CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1431
1432         /* Turn on send BD initiator state machine */
1433         CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1434
1435         /* Turn on send BD selector state machine */
1436         CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1437
1438         CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1439         CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1440             BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1441
1442         /* ack/clear link change events */
1443         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1444             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1445             BGE_MACSTAT_LINK_CHANGED);
1446         CSR_WRITE_4(sc, BGE_MI_STS, 0);
1447
1448         /* Enable PHY auto polling (for MII/GMII only) */
1449         if (sc->bge_tbi) {
1450                 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1451         } else {
1452                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1453                 if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1454                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1455                             BGE_EVTENB_MI_INTERRUPT);
1456         }
1457
1458         /* Enable link state change attentions. */
1459         BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1460
1461         return(0);
1462 }
1463
1464 /*
1465  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1466  * against our list and return its name if we find a match. Note
1467  * that since the Broadcom controller contains VPD support, we
1468  * can get the device name string from the controller itself instead
1469  * of the compiled-in string. This is a little slow, but it guarantees
1470  * we'll always announce the right product name.
1471  */
1472 static int
1473 bge_probe(device_t dev)
1474 {
1475         struct bge_softc *sc;
1476         struct bge_type *t;
1477         char *descbuf;
1478         uint16_t product, vendor;
1479
1480         product = pci_get_device(dev);
1481         vendor = pci_get_vendor(dev);
1482
1483         for (t = bge_devs; t->bge_name != NULL; t++) {
1484                 if (vendor == t->bge_vid && product == t->bge_did)
1485                         break;
1486         }
1487
1488         if (t->bge_name == NULL)
1489                 return(ENXIO);
1490
1491         sc = device_get_softc(dev);
1492         descbuf = kmalloc(BGE_DEVDESC_MAX, M_TEMP, M_WAITOK);
1493         ksnprintf(descbuf, BGE_DEVDESC_MAX, "%s, ASIC rev. %#04x", t->bge_name,
1494             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1495         device_set_desc_copy(dev, descbuf);
1496         if (pci_get_subvendor(dev) == PCI_VENDOR_DELL)
1497                 sc->bge_no_3_led = 1;
1498         kfree(descbuf, M_TEMP);
1499         return(0);
1500 }
1501
1502 static int
1503 bge_attach(device_t dev)
1504 {
1505         struct ifnet *ifp;
1506         struct bge_softc *sc;
1507         uint32_t hwcfg = 0;
1508         uint32_t mac_addr = 0;
1509         int error = 0, rid;
1510         uint8_t ether_addr[ETHER_ADDR_LEN];
1511
1512         sc = device_get_softc(dev);
1513         sc->bge_dev = dev;
1514         callout_init(&sc->bge_stat_timer);
1515         lwkt_serialize_init(&sc->bge_jslot_serializer);
1516
1517         /*
1518          * Map control/status registers.
1519          */
1520         pci_enable_busmaster(dev);
1521
1522         rid = BGE_PCI_BAR0;
1523         sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1524             RF_ACTIVE);
1525
1526         if (sc->bge_res == NULL) {
1527                 device_printf(dev, "couldn't map memory\n");
1528                 error = ENXIO;
1529                 return(error);
1530         }
1531
1532         sc->bge_btag = rman_get_bustag(sc->bge_res);
1533         sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
1534
1535         /* Allocate interrupt */
1536         rid = 0;
1537
1538         sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1539             RF_SHAREABLE | RF_ACTIVE);
1540
1541         if (sc->bge_irq == NULL) {
1542                 device_printf(dev, "couldn't map interrupt\n");
1543                 error = ENXIO;
1544                 goto fail;
1545         }
1546
1547         /* Save ASIC rev. */
1548         sc->bge_chipid =
1549             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1550             BGE_PCIMISCCTL_ASICREV;
1551         sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
1552         sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
1553
1554         /*
1555          * Treat the 5714 and the 5752 like the 5750 until we have more info
1556          * on this chip.
1557          */
1558         if (sc->bge_asicrev == BGE_ASICREV_BCM5714 ||
1559             sc->bge_asicrev == BGE_ASICREV_BCM5752)
1560                 sc->bge_asicrev = BGE_ASICREV_BCM5750;
1561
1562         /*
1563          * XXX: Broadcom Linux driver.  Not in specs or eratta.
1564          * PCI-Express?
1565          */
1566         if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
1567                 uint32_t v;
1568
1569                 v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
1570                 if (((v >> 8) & 0xff) == BGE_PCIE_MSI_CAPID) {
1571                         v = pci_read_config(dev, BGE_PCIE_MSI_CAPID, 4);
1572                         if ((v & 0xff) == BGE_PCIE_MSI_CAPID_VAL)
1573                                 sc->bge_pcie = 1;
1574                 }
1575         }
1576
1577         ifp = &sc->arpcom.ac_if;
1578         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1579
1580         /* Try to reset the chip. */
1581         bge_reset(sc);
1582
1583         if (bge_chipinit(sc)) {
1584                 device_printf(dev, "chip initialization failed\n");
1585                 error = ENXIO;
1586                 goto fail;
1587         }
1588
1589         /*
1590          * Get station address from the EEPROM.
1591          */
1592         mac_addr = bge_readmem_ind(sc, 0x0c14);
1593         if ((mac_addr >> 16) == 0x484b) {
1594                 ether_addr[0] = (uint8_t)(mac_addr >> 8);
1595                 ether_addr[1] = (uint8_t)mac_addr;
1596                 mac_addr = bge_readmem_ind(sc, 0x0c18);
1597                 ether_addr[2] = (uint8_t)(mac_addr >> 24);
1598                 ether_addr[3] = (uint8_t)(mac_addr >> 16);
1599                 ether_addr[4] = (uint8_t)(mac_addr >> 8);
1600                 ether_addr[5] = (uint8_t)mac_addr;
1601         } else if (bge_read_eeprom(sc, ether_addr,
1602             BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
1603                 device_printf(dev, "failed to read station address\n");
1604                 error = ENXIO;
1605                 goto fail;
1606         }
1607
1608         /* 5705/5750 limits RX return ring to 512 entries. */
1609         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1610             sc->bge_asicrev == BGE_ASICREV_BCM5750)
1611                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1612         else
1613                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
1614
1615         error = bge_dma_alloc(sc);
1616         if (error)
1617                 goto fail;
1618
1619         /* Set default tuneable values. */
1620         sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
1621         sc->bge_rx_coal_ticks = 150;
1622         sc->bge_tx_coal_ticks = 150;
1623         sc->bge_rx_max_coal_bds = 64;
1624         sc->bge_tx_max_coal_bds = 128;
1625
1626         /* Set up ifnet structure */
1627         ifp->if_softc = sc;
1628         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1629         ifp->if_ioctl = bge_ioctl;
1630         ifp->if_start = bge_start;
1631         ifp->if_watchdog = bge_watchdog;
1632         ifp->if_init = bge_init;
1633         ifp->if_mtu = ETHERMTU;
1634         ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1635         ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1636         ifq_set_ready(&ifp->if_snd);
1637
1638         /*
1639          * 5700 B0 chips do not support checksumming correctly due
1640          * to hardware bugs.
1641          */
1642         if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) {
1643                 ifp->if_capabilities |= IFCAP_HWCSUM;
1644                 ifp->if_hwassist = BGE_CSUM_FEATURES;
1645         }
1646         ifp->if_capenable = ifp->if_capabilities;
1647
1648         /*
1649          * Figure out what sort of media we have by checking the
1650          * hardware config word in the first 32k of NIC internal memory,
1651          * or fall back to examining the EEPROM if necessary.
1652          * Note: on some BCM5700 cards, this value appears to be unset.
1653          * If that's the case, we have to rely on identifying the NIC
1654          * by its PCI subsystem ID, as we do below for the SysKonnect
1655          * SK-9D41.
1656          */
1657         if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
1658                 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
1659         else {
1660                 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
1661                                     sizeof(hwcfg))) {
1662                         device_printf(dev, "failed to read EEPROM\n");
1663                         error = ENXIO;
1664                         goto fail;
1665                 }
1666                 hwcfg = ntohl(hwcfg);
1667         }
1668
1669         if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
1670                 sc->bge_tbi = 1;
1671
1672         /* The SysKonnect SK-9D41 is a 1000baseSX card. */
1673         if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41)
1674                 sc->bge_tbi = 1;
1675
1676         if (sc->bge_tbi) {
1677                 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
1678                     bge_ifmedia_upd, bge_ifmedia_sts);
1679                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
1680                 ifmedia_add(&sc->bge_ifmedia,
1681                     IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
1682                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1683                 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
1684                 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
1685         } else {
1686                 /*
1687                  * Do transceiver setup.
1688                  */
1689                 if (mii_phy_probe(dev, &sc->bge_miibus,
1690                     bge_ifmedia_upd, bge_ifmedia_sts)) {
1691                         device_printf(dev, "MII without any PHY!\n");
1692                         error = ENXIO;
1693                         goto fail;
1694                 }
1695         }
1696
1697         /*
1698          * When using the BCM5701 in PCI-X mode, data corruption has
1699          * been observed in the first few bytes of some received packets.
1700          * Aligning the packet buffer in memory eliminates the corruption.
1701          * Unfortunately, this misaligns the packet payloads.  On platforms
1702          * which do not support unaligned accesses, we will realign the
1703          * payloads by copying the received packets.
1704          */
1705         switch (sc->bge_chipid) {
1706         case BGE_CHIPID_BCM5701_A0:
1707         case BGE_CHIPID_BCM5701_B0:
1708         case BGE_CHIPID_BCM5701_B2:
1709         case BGE_CHIPID_BCM5701_B5:
1710                 /* If in PCI-X mode, work around the alignment bug. */
1711                 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
1712                     (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
1713                     BGE_PCISTATE_PCI_BUSSPEED)
1714                         sc->bge_rx_alignment_bug = 1;
1715                 break;
1716         }
1717
1718         /*
1719          * Call MI attach routine.
1720          */
1721         ether_ifattach(ifp, ether_addr, NULL);
1722
1723         error = bus_setup_intr(dev, sc->bge_irq, INTR_NETSAFE,
1724                                bge_intr, sc, &sc->bge_intrhand, 
1725                                ifp->if_serializer);
1726         if (error) {
1727                 ether_ifdetach(ifp);
1728                 device_printf(dev, "couldn't set up irq\n");
1729                 goto fail;
1730         }
1731         return(0);
1732 fail:
1733         bge_detach(dev);
1734         return(error);
1735 }
1736
1737 static int
1738 bge_detach(device_t dev)
1739 {
1740         struct bge_softc *sc = device_get_softc(dev);
1741         struct ifnet *ifp = &sc->arpcom.ac_if;
1742
1743         if (device_is_attached(dev)) {
1744                 lwkt_serialize_enter(ifp->if_serializer);
1745                 bge_stop(sc);
1746                 bge_reset(sc);
1747                 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
1748                 lwkt_serialize_exit(ifp->if_serializer);
1749
1750                 ether_ifdetach(ifp);
1751         }
1752         if (sc->bge_tbi)
1753                 ifmedia_removeall(&sc->bge_ifmedia);
1754         if (sc->bge_miibus)
1755                 device_delete_child(dev, sc->bge_miibus);
1756         bus_generic_detach(dev);
1757
1758         bge_release_resources(sc);
1759         bge_dma_free(sc);
1760
1761         return 0;
1762 }
1763
1764 static void
1765 bge_release_resources(struct bge_softc *sc)
1766 {
1767         device_t dev;
1768
1769         dev = sc->bge_dev;
1770
1771         if (sc->bge_irq != NULL)
1772                 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
1773
1774         if (sc->bge_res != NULL)
1775                 bus_release_resource(dev, SYS_RES_MEMORY,
1776                     BGE_PCI_BAR0, sc->bge_res);
1777 }
1778
1779 static void
1780 bge_reset(struct bge_softc *sc)
1781 {
1782         device_t dev;
1783         uint32_t cachesize, command, pcistate, reset;
1784         int i, val = 0;
1785
1786         dev = sc->bge_dev;
1787
1788         /* Save some important PCI state. */
1789         cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
1790         command = pci_read_config(dev, BGE_PCI_CMD, 4);
1791         pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
1792
1793         pci_write_config(dev, BGE_PCI_MISC_CTL,
1794             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1795             BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1796
1797         reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
1798
1799         /* XXX: Broadcom Linux driver. */
1800         if (sc->bge_pcie) {
1801                 if (CSR_READ_4(sc, 0x7e2c) == 0x60)     /* PCIE 1.0 */
1802                         CSR_WRITE_4(sc, 0x7e2c, 0x20);
1803                 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
1804                         /* Prevent PCIE link training during global reset */
1805                         CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
1806                         reset |= (1<<29);
1807                 }
1808         }
1809
1810         /* Issue global reset */
1811         bge_writereg_ind(sc, BGE_MISC_CFG, reset);
1812
1813         DELAY(1000);
1814
1815         /* XXX: Broadcom Linux driver. */
1816         if (sc->bge_pcie) {
1817                 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
1818                         uint32_t v;
1819
1820                         DELAY(500000); /* wait for link training to complete */
1821                         v = pci_read_config(dev, 0xc4, 4);
1822                         pci_write_config(dev, 0xc4, v | (1<<15), 4);
1823                 }
1824                 /* Set PCIE max payload size and clear error status. */
1825                 pci_write_config(dev, 0xd8, 0xf5000, 4);
1826         }
1827
1828         /* Reset some of the PCI state that got zapped by reset */
1829         pci_write_config(dev, BGE_PCI_MISC_CTL,
1830             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1831             BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1832         pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
1833         pci_write_config(dev, BGE_PCI_CMD, command, 4);
1834         bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
1835
1836         /* Enable memory arbiter. */
1837         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1838                 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
1839
1840         /*
1841          * Prevent PXE restart: write a magic number to the
1842          * general communications memory at 0xB50.
1843          */
1844         bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1845         /*
1846          * Poll the value location we just wrote until
1847          * we see the 1's complement of the magic number.
1848          * This indicates that the firmware initialization
1849          * is complete.
1850          */
1851         for (i = 0; i < BGE_TIMEOUT; i++) {
1852                 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
1853                 if (val == ~BGE_MAGIC_NUMBER)
1854                         break;
1855                 DELAY(10);
1856         }
1857         
1858         if (i == BGE_TIMEOUT) {
1859                 if_printf(&sc->arpcom.ac_if, "firmware handshake timed out\n");
1860                 return;
1861         }
1862
1863         /*
1864          * XXX Wait for the value of the PCISTATE register to
1865          * return to its original pre-reset state. This is a
1866          * fairly good indicator of reset completion. If we don't
1867          * wait for the reset to fully complete, trying to read
1868          * from the device's non-PCI registers may yield garbage
1869          * results.
1870          */
1871         for (i = 0; i < BGE_TIMEOUT; i++) {
1872                 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
1873                         break;
1874                 DELAY(10);
1875         }
1876
1877         /* Fix up byte swapping */
1878         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
1879             BGE_MODECTL_BYTESWAP_DATA);
1880
1881         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1882
1883         /*
1884          * The 5704 in TBI mode apparently needs some special
1885          * adjustment to insure the SERDES drive level is set
1886          * to 1.2V.
1887          */
1888         if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) {
1889                 uint32_t serdescfg;
1890
1891                 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
1892                 serdescfg = (serdescfg & ~0xFFF) | 0x880;
1893                 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
1894         }
1895
1896         /* XXX: Broadcom Linux driver. */
1897         if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
1898                 uint32_t v;
1899
1900                 v = CSR_READ_4(sc, 0x7c00);
1901                 CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
1902         }
1903
1904         DELAY(10000);
1905 }
1906
1907 /*
1908  * Frame reception handling. This is called if there's a frame
1909  * on the receive return list.
1910  *
1911  * Note: we have to be able to handle two possibilities here:
1912  * 1) the frame is from the jumbo recieve ring
1913  * 2) the frame is from the standard receive ring
1914  */
1915
1916 static void
1917 bge_rxeof(struct bge_softc *sc)
1918 {
1919         struct ifnet *ifp;
1920         int stdcnt = 0, jumbocnt = 0;
1921
1922         if (sc->bge_rx_saved_considx ==
1923             sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx)
1924                 return;
1925
1926         ifp = &sc->arpcom.ac_if;
1927
1928         bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
1929                         sc->bge_cdata.bge_rx_return_ring_map,
1930                         BUS_DMASYNC_POSTREAD);
1931         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1932                         sc->bge_cdata.bge_rx_std_ring_map,
1933                         BUS_DMASYNC_POSTREAD);
1934         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1935             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1936                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1937                                 sc->bge_cdata.bge_rx_jumbo_ring_map,
1938                                 BUS_DMASYNC_POSTREAD);
1939         }
1940
1941         while (sc->bge_rx_saved_considx !=
1942                sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
1943                 struct bge_rx_bd        *cur_rx;
1944                 uint32_t                rxidx;
1945                 struct mbuf             *m = NULL;
1946                 uint16_t                vlan_tag = 0;
1947                 int                     have_tag = 0;
1948
1949                 cur_rx =
1950             &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
1951
1952                 rxidx = cur_rx->bge_idx;
1953                 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
1954
1955                 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
1956                         have_tag = 1;
1957                         vlan_tag = cur_rx->bge_vlan_tag;
1958                 }
1959
1960                 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
1961                         BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1962                         m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
1963                         sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
1964                         jumbocnt++;
1965                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
1966                                 ifp->if_ierrors++;
1967                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
1968                                 continue;
1969                         }
1970                         if (bge_newbuf_jumbo(sc,
1971                             sc->bge_jumbo, NULL) == ENOBUFS) {
1972                                 ifp->if_ierrors++;
1973                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
1974                                 continue;
1975                         }
1976                 } else {
1977                         BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1978                         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
1979                                         sc->bge_cdata.bge_rx_std_dmamap[rxidx],
1980                                         BUS_DMASYNC_POSTREAD);
1981                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
1982                                 sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
1983                         m = sc->bge_cdata.bge_rx_std_chain[rxidx];
1984                         sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
1985                         stdcnt++;
1986                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
1987                                 ifp->if_ierrors++;
1988                                 bge_newbuf_std(sc, sc->bge_std, m);
1989                                 continue;
1990                         }
1991                         if (bge_newbuf_std(sc, sc->bge_std,
1992                             NULL) == ENOBUFS) {
1993                                 ifp->if_ierrors++;
1994                                 bge_newbuf_std(sc, sc->bge_std, m);
1995                                 continue;
1996                         }
1997                 }
1998
1999                 ifp->if_ipackets++;
2000 #ifndef __i386__
2001                 /*
2002                  * The i386 allows unaligned accesses, but for other
2003                  * platforms we must make sure the payload is aligned.
2004                  */
2005                 if (sc->bge_rx_alignment_bug) {
2006                         bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2007                             cur_rx->bge_len);
2008                         m->m_data += ETHER_ALIGN;
2009                 }
2010 #endif
2011                 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2012                 m->m_pkthdr.rcvif = ifp;
2013
2014                 if (ifp->if_capenable & IFCAP_RXCSUM) {
2015                         if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2016                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2017                                 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2018                                         m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2019                         }
2020                         if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
2021                             m->m_pkthdr.len >= BGE_MIN_FRAME) {
2022                                 m->m_pkthdr.csum_data =
2023                                     cur_rx->bge_tcp_udp_csum;
2024                                 m->m_pkthdr.csum_flags |=
2025                                         CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2026                         }
2027                 }
2028
2029                 /*
2030                  * If we received a packet with a vlan tag, pass it
2031                  * to vlan_input() instead of ether_input().
2032                  */
2033                 if (have_tag) {
2034                         VLAN_INPUT_TAG(m, vlan_tag);
2035                         have_tag = vlan_tag = 0;
2036                 } else {
2037                         ifp->if_input(ifp, m);
2038                 }
2039         }
2040
2041         if (stdcnt > 0) {
2042                 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2043                                 sc->bge_cdata.bge_rx_std_ring_map,
2044                                 BUS_DMASYNC_PREWRITE);
2045         }
2046
2047         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2048             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2049                 if (jumbocnt > 0) {
2050                         bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2051                                         sc->bge_cdata.bge_rx_jumbo_ring_map,
2052                                         BUS_DMASYNC_PREWRITE);
2053                 }
2054         }
2055
2056         CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2057         if (stdcnt)
2058                 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2059         if (jumbocnt)
2060                 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2061 }
2062
2063 static void
2064 bge_txeof(struct bge_softc *sc)
2065 {
2066         struct bge_tx_bd *cur_tx = NULL;
2067         struct ifnet *ifp;
2068
2069         if (sc->bge_tx_saved_considx ==
2070             sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
2071                 return;
2072
2073         ifp = &sc->arpcom.ac_if;
2074
2075         bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
2076                         sc->bge_cdata.bge_tx_ring_map,
2077                         BUS_DMASYNC_POSTREAD);
2078
2079         /*
2080          * Go through our tx ring and free mbufs for those
2081          * frames that have been sent.
2082          */
2083         while (sc->bge_tx_saved_considx !=
2084                sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2085                 uint32_t idx = 0;
2086
2087                 idx = sc->bge_tx_saved_considx;
2088                 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2089                 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2090                         ifp->if_opackets++;
2091                 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2092                         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2093                                         sc->bge_cdata.bge_tx_dmamap[idx],
2094                                         BUS_DMASYNC_POSTWRITE);
2095                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2096                             sc->bge_cdata.bge_tx_dmamap[idx]);
2097                         m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2098                         sc->bge_cdata.bge_tx_chain[idx] = NULL;
2099                 }
2100                 sc->bge_txcnt--;
2101                 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2102         }
2103
2104         if (cur_tx != NULL &&
2105             (BGE_TX_RING_CNT - sc->bge_txcnt) >=
2106             (BGE_NSEG_RSVD + BGE_NSEG_SPARE))
2107                 ifp->if_flags &= ~IFF_OACTIVE;
2108
2109         if (sc->bge_txcnt == 0)
2110                 ifp->if_timer = 0;
2111
2112         if (!ifq_is_empty(&ifp->if_snd))
2113                 ifp->if_start(ifp);
2114 }
2115
2116 static void
2117 bge_intr(void *xsc)
2118 {
2119         struct bge_softc *sc = xsc;
2120         struct ifnet *ifp = &sc->arpcom.ac_if;
2121         uint32_t status, mimode;
2122
2123         /*
2124          * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO.  Don't
2125          * disable interrupts by writing nonzero like we used to, since with
2126          * our current organization this just gives complications and
2127          * pessimizations for re-enabling interrupts.  We used to have races
2128          * instead of the necessary complications.  Disabling interrupts
2129          * would just reduce the chance of a status update while we are
2130          * running (by switching to the interrupt-mode coalescence
2131          * parameters), but this chance is already very low so it is more
2132          * efficient to get another interrupt than prevent it.
2133          *
2134          * We do the ack first to ensure another interrupt if there is a
2135          * status update after the ack.  We don't check for the status
2136          * changing later because it is more efficient to get another
2137          * interrupt than prevent it, not quite as above (not checking is
2138          * a smaller optimization than not toggling the interrupt enable,
2139          * since checking doesn't involve PCI accesses and toggling require
2140          * the status check).  So toggling would probably be a pessimization
2141          * even with MSI.  It would only be needed for using a task queue.
2142          */
2143         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2144
2145         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2146                         sc->bge_cdata.bge_status_map,
2147                         BUS_DMASYNC_POSTREAD);
2148
2149         status = CSR_READ_4(sc, BGE_MAC_STS);
2150
2151         /*
2152          * Process link state changes.
2153          * Grrr. The link status word in the status block does
2154          * not work correctly on the BCM5700 rev AX and BX chips,
2155          * according to all available information. Hence, we have
2156          * to enable MII interrupts in order to properly obtain
2157          * async link changes. Unfortunately, this also means that
2158          * we have to read the MAC status register to detect link
2159          * changes, thereby adding an additional register access to
2160          * the interrupt handler.
2161          */
2162
2163         if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
2164                 if (status & BGE_MACSTAT_MI_INTERRUPT) {
2165                         sc->bge_link = 0;
2166                         callout_stop(&sc->bge_stat_timer);
2167                         bge_tick_serialized(sc);
2168                         /* Clear the interrupt */
2169                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2170                             BGE_EVTENB_MI_INTERRUPT);
2171                         bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
2172                         bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
2173                             BRGPHY_INTRS);
2174                 }
2175         } else {
2176                 if (status & BGE_MACSTAT_LINK_CHANGED) {
2177                         /*
2178                          * Sometimes PCS encoding errors are detected in
2179                          * TBI mode (on fiber NICs), and for some reason
2180                          * the chip will signal them as link changes.
2181                          * If we get a link change event, but the 'PCS
2182                          * encoding error' bit in the MAC status register
2183                          * is set, don't bother doing a link check.
2184                          * This avoids spurious "gigabit link up" messages
2185                          * that sometimes appear on fiber NICs during
2186                          * periods of heavy traffic. (There should be no
2187                          * effect on copper NICs.)
2188                          *
2189                          * If we do have a copper NIC (bge_tbi == 0) then
2190                          * check that the AUTOPOLL bit is set before
2191                          * processing the event as a real link change.
2192                          * Turning AUTOPOLL on and off in the MII read/write
2193                          * functions will often trigger a link status
2194                          * interrupt for no reason.
2195                          */
2196                         mimode = CSR_READ_4(sc, BGE_MI_MODE);
2197                         if (!(status & (BGE_MACSTAT_PORT_DECODE_ERROR |
2198                                         BGE_MACSTAT_MI_COMPLETE)) &&
2199                             (!sc->bge_tbi && (mimode & BGE_MIMODE_AUTOPOLL))) {
2200                                 sc->bge_link = 0;
2201                                 callout_stop(&sc->bge_stat_timer);
2202                                 bge_tick_serialized(sc);
2203                         }
2204                         sc->bge_link = 0;
2205                         callout_stop(&sc->bge_stat_timer);
2206                         bge_tick_serialized(sc);
2207                         /* Clear the interrupt */
2208                         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
2209                             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
2210                             BGE_MACSTAT_LINK_CHANGED);
2211                 }
2212         }
2213
2214         if (ifp->if_flags & IFF_RUNNING) {
2215                 /* Check RX return ring producer/consumer */
2216                 bge_rxeof(sc);
2217
2218                 /* Check TX ring producer/consumer */
2219                 bge_txeof(sc);
2220         }
2221 }
2222
2223 static void
2224 bge_tick(void *xsc)
2225 {
2226         struct bge_softc *sc = xsc;
2227         struct ifnet *ifp = &sc->arpcom.ac_if;
2228
2229         lwkt_serialize_enter(ifp->if_serializer);
2230         bge_tick_serialized(xsc);
2231         lwkt_serialize_exit(ifp->if_serializer);
2232 }
2233
2234 static void
2235 bge_tick_serialized(void *xsc)
2236 {
2237         struct bge_softc *sc = xsc;
2238         struct ifnet *ifp = &sc->arpcom.ac_if;
2239         struct mii_data *mii = NULL;
2240         struct ifmedia *ifm = NULL;
2241
2242         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2243             sc->bge_asicrev == BGE_ASICREV_BCM5750)
2244                 bge_stats_update_regs(sc);
2245         else
2246                 bge_stats_update(sc);
2247
2248         callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2249
2250         if (sc->bge_link) {
2251                 return;
2252         }
2253
2254         if (sc->bge_tbi) {
2255                 ifm = &sc->bge_ifmedia;
2256                 if (CSR_READ_4(sc, BGE_MAC_STS) &
2257                     BGE_MACSTAT_TBI_PCS_SYNCHED) {
2258                         sc->bge_link++;
2259                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
2260                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
2261                                            BGE_MACMODE_TBI_SEND_CFGS);
2262                         }
2263                         CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
2264                         if_printf(ifp, "gigabit link up\n");
2265                         if (!ifq_is_empty(&ifp->if_snd))
2266                                 ifp->if_start(ifp);
2267                 }
2268                 return;
2269         }
2270
2271         mii = device_get_softc(sc->bge_miibus);
2272         mii_tick(mii);
2273  
2274         if (!sc->bge_link) {
2275                 mii_pollstat(mii);
2276                 if (mii->mii_media_status & IFM_ACTIVE &&
2277                     IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2278                         sc->bge_link++;
2279                         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
2280                             IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
2281                                 if_printf(ifp, "gigabit link up\n");
2282                         if (!ifq_is_empty(&ifp->if_snd))
2283                                 ifp->if_start(ifp);
2284                 }
2285         }
2286 }
2287
2288 static void
2289 bge_stats_update_regs(struct bge_softc *sc)
2290 {
2291         struct ifnet *ifp = &sc->arpcom.ac_if;
2292         struct bge_mac_stats_regs stats;
2293         uint32_t *s;
2294         int i;
2295
2296         s = (uint32_t *)&stats;
2297         for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2298                 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2299                 s++;
2300         }
2301
2302         ifp->if_collisions +=
2303            (stats.dot3StatsSingleCollisionFrames +
2304            stats.dot3StatsMultipleCollisionFrames +
2305            stats.dot3StatsExcessiveCollisions +
2306            stats.dot3StatsLateCollisions) -
2307            ifp->if_collisions;
2308 }
2309
2310 static void
2311 bge_stats_update(struct bge_softc *sc)
2312 {
2313         struct ifnet *ifp = &sc->arpcom.ac_if;
2314         bus_size_t stats;
2315
2316         stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2317
2318 #define READ_STAT(sc, stats, stat)      \
2319         CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2320
2321         ifp->if_collisions +=
2322            (READ_STAT(sc, stats,
2323                 txstats.dot3StatsSingleCollisionFrames.bge_addr_lo) +
2324             READ_STAT(sc, stats,
2325                 txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo) +
2326             READ_STAT(sc, stats,
2327                 txstats.dot3StatsExcessiveCollisions.bge_addr_lo) +
2328             READ_STAT(sc, stats,
2329                 txstats.dot3StatsLateCollisions.bge_addr_lo)) -
2330            ifp->if_collisions;
2331
2332 #undef READ_STAT
2333
2334 #ifdef notdef
2335         ifp->if_collisions +=
2336            (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2337            sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2338            sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2339            sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2340            ifp->if_collisions;
2341 #endif
2342 }
2343
2344 /*
2345  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
2346  * pointers to descriptors.
2347  */
2348 static int
2349 bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx)
2350 {
2351         struct bge_tx_bd *d = NULL;
2352         uint16_t csum_flags = 0;
2353         struct ifvlan *ifv = NULL;
2354         struct bge_dmamap_arg ctx;
2355         bus_dma_segment_t segs[BGE_NSEG_NEW];
2356         bus_dmamap_t map;
2357         int error, maxsegs, idx, i;
2358
2359         if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
2360             m_head->m_pkthdr.rcvif != NULL &&
2361             m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
2362                 ifv = m_head->m_pkthdr.rcvif->if_softc;
2363
2364         if (m_head->m_pkthdr.csum_flags) {
2365                 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2366                         csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2367                 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2368                         csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2369                 if (m_head->m_flags & M_LASTFRAG)
2370                         csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2371                 else if (m_head->m_flags & M_FRAG)
2372                         csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2373         }
2374
2375         idx = *txidx;
2376         map = sc->bge_cdata.bge_tx_dmamap[idx];
2377
2378         maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - BGE_NSEG_RSVD;
2379         KASSERT(maxsegs >= BGE_NSEG_SPARE,
2380                 ("not enough segments %d\n", maxsegs));
2381
2382         if (maxsegs > BGE_NSEG_NEW)
2383                 maxsegs = BGE_NSEG_NEW;
2384
2385         /*
2386          * Pad outbound frame to BGE_MIN_FRAME for an unusual reason.
2387          * The bge hardware will pad out Tx runts to BGE_MIN_FRAME,
2388          * but when such padded frames employ the bge IP/TCP checksum
2389          * offload, the hardware checksum assist gives incorrect results
2390          * (possibly from incorporating its own padding into the UDP/TCP
2391          * checksum; who knows).  If we pad such runts with zeros, the
2392          * onboard checksum comes out correct.  We do this by pretending
2393          * the mbuf chain has too many fragments so the coalescing code
2394          * below can assemble the packet into a single buffer that's
2395          * padded out to the mininum frame size.
2396          */
2397         if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
2398             m_head->m_pkthdr.len < BGE_MIN_FRAME) {
2399                 error = E2BIG;
2400         } else {
2401                 ctx.bge_segs = segs;
2402                 ctx.bge_maxsegs = maxsegs;
2403                 error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map,
2404                                              m_head, bge_dma_map_mbuf, &ctx,
2405                                              BUS_DMA_NOWAIT);
2406         }
2407         if (error == E2BIG || ctx.bge_maxsegs == 0) {
2408                 struct mbuf *m_new;
2409
2410                 m_new = m_defrag(m_head, MB_DONTWAIT);
2411                 if (m_new == NULL) {
2412                         if_printf(&sc->arpcom.ac_if,
2413                                   "could not defrag TX mbuf\n");
2414                         error = ENOBUFS;
2415                         goto back;
2416                 } else {
2417                         m_head = m_new;
2418                 }
2419
2420                 /*
2421                  * Manually pad short frames, and zero the pad space
2422                  * to avoid leaking data.
2423                  */
2424                 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
2425                     m_head->m_pkthdr.len < BGE_MIN_FRAME) {
2426                         int pad_len = BGE_MIN_FRAME - m_head->m_pkthdr.len;
2427
2428                         bzero(mtod(m_head, char *) + m_head->m_pkthdr.len,
2429                               pad_len);
2430                         m_head->m_pkthdr.len += pad_len;
2431                         m_head->m_len = m_head->m_pkthdr.len;
2432                 }
2433
2434                 ctx.bge_segs = segs;
2435                 ctx.bge_maxsegs = maxsegs;
2436                 error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map,
2437                                              m_head, bge_dma_map_mbuf, &ctx,
2438                                              BUS_DMA_NOWAIT);
2439                 if (error || ctx.bge_maxsegs == 0) {
2440                         if_printf(&sc->arpcom.ac_if,
2441                                   "could not defrag TX mbuf\n");
2442                         if (error == 0)
2443                                 error = E2BIG;
2444                         goto back;
2445                 }
2446         } else if (error) {
2447                 if_printf(&sc->arpcom.ac_if, "could not map TX mbuf\n");
2448                 goto back;
2449         }
2450
2451         bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE);
2452
2453         for (i = 0; ; i++) {
2454                 d = &sc->bge_ldata.bge_tx_ring[idx];
2455
2456                 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_segs[i].ds_addr);
2457                 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_segs[i].ds_addr);
2458                 d->bge_len = segs[i].ds_len;
2459                 d->bge_flags = csum_flags;
2460
2461                 if (i == ctx.bge_maxsegs - 1)
2462                         break;
2463                 BGE_INC(idx, BGE_TX_RING_CNT);
2464         }
2465         /* Mark the last segment as end of packet... */
2466         d->bge_flags |= BGE_TXBDFLAG_END;
2467
2468         /* Set vlan tag to the first segment of the packet. */
2469         d = &sc->bge_ldata.bge_tx_ring[*txidx];
2470         if (ifv != NULL) {
2471                 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2472                 d->bge_vlan_tag = ifv->ifv_tag;
2473         } else {
2474                 d->bge_vlan_tag = 0;
2475         }
2476
2477         /*
2478          * Insure that the map for this transmission is placed at
2479          * the array index of the last descriptor in this chain.
2480          */
2481         sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
2482         sc->bge_cdata.bge_tx_dmamap[idx] = map;
2483         sc->bge_cdata.bge_tx_chain[idx] = m_head;
2484         sc->bge_txcnt += ctx.bge_maxsegs;
2485
2486         BGE_INC(idx, BGE_TX_RING_CNT);
2487         *txidx = idx;
2488 back:
2489         if (error)
2490                 m_freem(m_head);
2491         return error;
2492 }
2493
2494 /*
2495  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2496  * to the mbuf data regions directly in the transmit descriptors.
2497  */
2498 static void
2499 bge_start(struct ifnet *ifp)
2500 {
2501         struct bge_softc *sc = ifp->if_softc;
2502         struct mbuf *m_head = NULL;
2503         uint32_t prodidx;
2504         int need_trans;
2505
2506         if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING ||
2507             !sc->bge_link)
2508                 return;
2509
2510         prodidx = sc->bge_tx_prodidx;
2511
2512         need_trans = 0;
2513         while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
2514                 m_head = ifq_poll(&ifp->if_snd);
2515                 if (m_head == NULL)
2516                         break;
2517
2518                 /*
2519                  * XXX
2520                  * The code inside the if() block is never reached since we
2521                  * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
2522                  * requests to checksum TCP/UDP in a fragmented packet.
2523                  * 
2524                  * XXX
2525                  * safety overkill.  If this is a fragmented packet chain
2526                  * with delayed TCP/UDP checksums, then only encapsulate
2527                  * it if we have enough descriptors to handle the entire
2528                  * chain at once.
2529                  * (paranoia -- may not actually be needed)
2530                  */
2531                 if (m_head->m_flags & M_FIRSTFRAG &&
2532                     m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
2533                         if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2534                             m_head->m_pkthdr.csum_data + 16) {
2535                                 ifp->if_flags |= IFF_OACTIVE;
2536                                 break;
2537                         }
2538                 }
2539
2540                 /*
2541                  * Sanity check: avoid coming within BGE_NSEG_RSVD
2542                  * descriptors of the end of the ring.  Also make
2543                  * sure there are BGE_NSEG_SPARE descriptors for
2544                  * jumbo buffers' defragmentation.
2545                  */
2546                 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2547                     (BGE_NSEG_RSVD + BGE_NSEG_SPARE)) {
2548                         ifp->if_flags |= IFF_OACTIVE;
2549                         break;
2550                 }
2551
2552                 /*
2553                  * Dequeue the packet before encapsulation, since
2554                  * bge_encap() may free the packet if error happens.
2555                  */
2556                 ifq_dequeue(&ifp->if_snd, m_head);
2557
2558                 /*
2559                  * Pack the data into the transmit ring. If we
2560                  * don't have room, set the OACTIVE flag and wait
2561                  * for the NIC to drain the ring.
2562                  */
2563                 if (bge_encap(sc, m_head, &prodidx)) {
2564                         ifp->if_flags |= IFF_OACTIVE;
2565                         break;
2566                 }
2567                 need_trans = 1;
2568
2569                 BPF_MTAP(ifp, m_head);
2570         }
2571
2572         if (!need_trans)
2573                 return;
2574
2575         /* Transmit */
2576         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2577         /* 5700 b2 errata */
2578         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
2579                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2580
2581         sc->bge_tx_prodidx = prodidx;
2582
2583         /*
2584          * Set a timeout in case the chip goes out to lunch.
2585          */
2586         ifp->if_timer = 5;
2587 }
2588
2589 static void
2590 bge_init(void *xsc)
2591 {
2592         struct bge_softc *sc = xsc;
2593         struct ifnet *ifp = &sc->arpcom.ac_if;
2594         uint16_t *m;
2595
2596         ASSERT_SERIALIZED(ifp->if_serializer);
2597
2598         if (ifp->if_flags & IFF_RUNNING)
2599                 return;
2600
2601         /* Cancel pending I/O and flush buffers. */
2602         bge_stop(sc);
2603         bge_reset(sc);
2604         bge_chipinit(sc);
2605
2606         /*
2607          * Init the various state machines, ring
2608          * control blocks and firmware.
2609          */
2610         if (bge_blockinit(sc)) {
2611                 if_printf(ifp, "initialization failure\n");
2612                 return;
2613         }
2614
2615         /* Specify MTU. */
2616         CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
2617             ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
2618
2619         /* Load our MAC address. */
2620         m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
2621         CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2622         CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2623
2624         /* Enable or disable promiscuous mode as needed. */
2625         bge_setpromisc(sc);
2626
2627         /* Program multicast filter. */
2628         bge_setmulti(sc);
2629
2630         /* Init RX ring. */
2631         bge_init_rx_ring_std(sc);
2632
2633         /*
2634          * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
2635          * memory to insure that the chip has in fact read the first
2636          * entry of the ring.
2637          */
2638         if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
2639                 uint32_t                v, i;
2640                 for (i = 0; i < 10; i++) {
2641                         DELAY(20);
2642                         v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
2643                         if (v == (MCLBYTES - ETHER_ALIGN))
2644                                 break;
2645                 }
2646                 if (i == 10)
2647                         if_printf(ifp, "5705 A0 chip failed to load RX ring\n");
2648         }
2649
2650         /* Init jumbo RX ring. */
2651         if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2652                 bge_init_rx_ring_jumbo(sc);
2653
2654         /* Init our RX return ring index */
2655         sc->bge_rx_saved_considx = 0;
2656
2657         /* Init TX ring. */
2658         bge_init_tx_ring(sc);
2659
2660         /* Turn on transmitter */
2661         BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
2662
2663         /* Turn on receiver */
2664         BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2665
2666         /* Tell firmware we're alive. */
2667         BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2668
2669         /* Enable host interrupts. */
2670         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
2671         BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2672         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2673
2674         bge_ifmedia_upd(ifp);
2675
2676         ifp->if_flags |= IFF_RUNNING;
2677         ifp->if_flags &= ~IFF_OACTIVE;
2678
2679         callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2680 }
2681
2682 /*
2683  * Set media options.
2684  */
2685 static int
2686 bge_ifmedia_upd(struct ifnet *ifp)
2687 {
2688         struct bge_softc *sc = ifp->if_softc;
2689         struct ifmedia *ifm = &sc->bge_ifmedia;
2690         struct mii_data *mii;
2691
2692         /* If this is a 1000baseX NIC, enable the TBI port. */
2693         if (sc->bge_tbi) {
2694                 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2695                         return(EINVAL);
2696                 switch(IFM_SUBTYPE(ifm->ifm_media)) {
2697                 case IFM_AUTO:
2698                         /*
2699                          * The BCM5704 ASIC appears to have a special
2700                          * mechanism for programming the autoneg
2701                          * advertisement registers in TBI mode.
2702                          */
2703                         if (!bge_fake_autoneg &&
2704                             sc->bge_asicrev == BGE_ASICREV_BCM5704) {
2705                                 uint32_t sgdig;
2706
2707                                 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
2708                                 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
2709                                 sgdig |= BGE_SGDIGCFG_AUTO |
2710                                          BGE_SGDIGCFG_PAUSE_CAP |
2711                                          BGE_SGDIGCFG_ASYM_PAUSE;
2712                                 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
2713                                             sgdig | BGE_SGDIGCFG_SEND);
2714                                 DELAY(5);
2715                                 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
2716                         }
2717                         break;
2718                 case IFM_1000_SX:
2719                         if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
2720                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
2721                                     BGE_MACMODE_HALF_DUPLEX);
2722                         } else {
2723                                 BGE_SETBIT(sc, BGE_MAC_MODE,
2724                                     BGE_MACMODE_HALF_DUPLEX);
2725                         }
2726                         break;
2727                 default:
2728                         return(EINVAL);
2729                 }
2730                 return(0);
2731         }
2732
2733         mii = device_get_softc(sc->bge_miibus);
2734         sc->bge_link = 0;
2735         if (mii->mii_instance) {
2736                 struct mii_softc *miisc;
2737                 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2738                         mii_phy_reset(miisc);
2739         }
2740         mii_mediachg(mii);
2741
2742         return(0);
2743 }
2744
2745 /*
2746  * Report current media status.
2747  */
2748 static void
2749 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2750 {
2751         struct bge_softc *sc = ifp->if_softc;
2752         struct mii_data *mii;
2753
2754         if (sc->bge_tbi) {
2755                 ifmr->ifm_status = IFM_AVALID;
2756                 ifmr->ifm_active = IFM_ETHER;
2757                 if (CSR_READ_4(sc, BGE_MAC_STS) &
2758                     BGE_MACSTAT_TBI_PCS_SYNCHED)
2759                         ifmr->ifm_status |= IFM_ACTIVE;
2760                 ifmr->ifm_active |= IFM_1000_SX;
2761                 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
2762                         ifmr->ifm_active |= IFM_HDX;    
2763                 else
2764                         ifmr->ifm_active |= IFM_FDX;
2765                 return;
2766         }
2767
2768         mii = device_get_softc(sc->bge_miibus);
2769         mii_pollstat(mii);
2770         ifmr->ifm_active = mii->mii_media_active;
2771         ifmr->ifm_status = mii->mii_media_status;
2772 }
2773
2774 static int
2775 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
2776 {
2777         struct bge_softc *sc = ifp->if_softc;
2778         struct ifreq *ifr = (struct ifreq *) data;
2779         int mask, error = 0;
2780         struct mii_data *mii;
2781
2782         ASSERT_SERIALIZED(ifp->if_serializer);
2783
2784         switch(command) {
2785         case SIOCSIFMTU:
2786                 /* Disallow jumbo frames on 5705/5750. */
2787                 if (((sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2788                       sc->bge_asicrev == BGE_ASICREV_BCM5750) &&
2789                      ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
2790                         error = EINVAL;
2791                 else {
2792                         ifp->if_mtu = ifr->ifr_mtu;
2793                         ifp->if_flags &= ~IFF_RUNNING;
2794                         bge_init(sc);
2795                 }
2796                 break;
2797         case SIOCSIFFLAGS:
2798                 if (ifp->if_flags & IFF_UP) {
2799                         if (ifp->if_flags & IFF_RUNNING) {
2800                                 int flags = ifp->if_flags & sc->bge_if_flags;
2801
2802                                 /*
2803                                  * If only the state of the PROMISC flag
2804                                  * changed, then just use the 'set promisc
2805                                  * mode' command instead of reinitializing
2806                                  * the entire NIC. Doing a full re-init
2807                                  * means reloading the firmware and waiting
2808                                  * for it to start up, which may take a
2809                                  * second or two.  Similarly for ALLMULTI.
2810                                  */
2811                                 if (flags & IFF_PROMISC)
2812                                         bge_setpromisc(sc);
2813                                 if (flags & IFF_ALLMULTI)
2814                                         bge_setmulti(sc);
2815                         } else {
2816                                 bge_init(sc);
2817                         }
2818                 } else {
2819                         if (ifp->if_flags & IFF_RUNNING)
2820                                 bge_stop(sc);
2821                 }
2822                 sc->bge_if_flags = ifp->if_flags;
2823                 error = 0;
2824                 break;
2825         case SIOCADDMULTI:
2826         case SIOCDELMULTI:
2827                 if (ifp->if_flags & IFF_RUNNING) {
2828                         bge_setmulti(sc);
2829                         error = 0;
2830                 }
2831                 break;
2832         case SIOCSIFMEDIA:
2833         case SIOCGIFMEDIA:
2834                 if (sc->bge_tbi) {
2835                         error = ifmedia_ioctl(ifp, ifr,
2836                             &sc->bge_ifmedia, command);
2837                 } else {
2838                         mii = device_get_softc(sc->bge_miibus);
2839                         error = ifmedia_ioctl(ifp, ifr,
2840                             &mii->mii_media, command);
2841                 }
2842                 break;
2843         case SIOCSIFCAP:
2844                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2845                 if (mask & IFCAP_HWCSUM) {
2846                         ifp->if_capenable ^= IFCAP_HWCSUM;
2847                         if (IFCAP_HWCSUM & ifp->if_capenable)
2848                                 ifp->if_hwassist = BGE_CSUM_FEATURES;
2849                         else
2850                                 ifp->if_hwassist = 0;
2851                 }
2852                 error = 0;
2853                 break;
2854         default:
2855                 error = ether_ioctl(ifp, command, data);
2856                 break;
2857         }
2858         return(error);
2859 }
2860
2861 static void
2862 bge_watchdog(struct ifnet *ifp)
2863 {
2864         struct bge_softc *sc = ifp->if_softc;
2865
2866         if_printf(ifp, "watchdog timeout -- resetting\n");
2867
2868         ifp->if_flags &= ~IFF_RUNNING;
2869         bge_init(sc);
2870
2871         ifp->if_oerrors++;
2872
2873         if (!ifq_is_empty(&ifp->if_snd))
2874                 ifp->if_start(ifp);
2875 }
2876
2877 /*
2878  * Stop the adapter and free any mbufs allocated to the
2879  * RX and TX lists.
2880  */
2881 static void
2882 bge_stop(struct bge_softc *sc)
2883 {
2884         struct ifnet *ifp = &sc->arpcom.ac_if;
2885         struct ifmedia_entry *ifm;
2886         struct mii_data *mii = NULL;
2887         int mtmp, itmp;
2888
2889         ASSERT_SERIALIZED(ifp->if_serializer);
2890
2891         if (!sc->bge_tbi)
2892                 mii = device_get_softc(sc->bge_miibus);
2893
2894         callout_stop(&sc->bge_stat_timer);
2895
2896         /*
2897          * Disable all of the receiver blocks
2898          */
2899         BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2900         BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2901         BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2902         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2903             sc->bge_asicrev != BGE_ASICREV_BCM5750)
2904                 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2905         BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
2906         BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2907         BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
2908
2909         /*
2910          * Disable all of the transmit blocks
2911          */
2912         BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2913         BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2914         BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2915         BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
2916         BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
2917         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2918             sc->bge_asicrev != BGE_ASICREV_BCM5750)
2919                 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2920         BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2921
2922         /*
2923          * Shut down all of the memory managers and related
2924          * state machines.
2925          */
2926         BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
2927         BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
2928         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2929             sc->bge_asicrev != BGE_ASICREV_BCM5750)
2930                 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2931         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
2932         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
2933         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2934             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2935                 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
2936                 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2937         }
2938
2939         /* Disable host interrupts. */
2940         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2941         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2942
2943         /*
2944          * Tell firmware we're shutting down.
2945          */
2946         BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2947
2948         /* Free the RX lists. */
2949         bge_free_rx_ring_std(sc);
2950
2951         /* Free jumbo RX list. */
2952         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2953             sc->bge_asicrev != BGE_ASICREV_BCM5750)
2954                 bge_free_rx_ring_jumbo(sc);
2955
2956         /* Free TX buffers. */
2957         bge_free_tx_ring(sc);
2958
2959         /*
2960          * Isolate/power down the PHY, but leave the media selection
2961          * unchanged so that things will be put back to normal when
2962          * we bring the interface back up.
2963          */
2964         if (!sc->bge_tbi) {
2965                 itmp = ifp->if_flags;
2966                 ifp->if_flags |= IFF_UP;
2967                 ifm = mii->mii_media.ifm_cur;
2968                 mtmp = ifm->ifm_media;
2969                 ifm->ifm_media = IFM_ETHER|IFM_NONE;
2970                 mii_mediachg(mii);
2971                 ifm->ifm_media = mtmp;
2972                 ifp->if_flags = itmp;
2973         }
2974
2975         sc->bge_link = 0;
2976
2977         sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
2978
2979         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2980 }
2981
2982 /*
2983  * Stop all chip I/O so that the kernel's probe routines don't
2984  * get confused by errant DMAs when rebooting.
2985  */
2986 static void
2987 bge_shutdown(device_t dev)
2988 {
2989         struct bge_softc *sc = device_get_softc(dev);
2990         struct ifnet *ifp = &sc->arpcom.ac_if;
2991
2992         lwkt_serialize_enter(ifp->if_serializer);
2993         bge_stop(sc);
2994         bge_reset(sc);
2995         lwkt_serialize_exit(ifp->if_serializer);
2996 }
2997
2998 static int
2999 bge_suspend(device_t dev)
3000 {
3001         struct bge_softc *sc = device_get_softc(dev);
3002         struct ifnet *ifp = &sc->arpcom.ac_if;
3003
3004         lwkt_serialize_enter(ifp->if_serializer);
3005         bge_stop(sc);
3006         lwkt_serialize_exit(ifp->if_serializer);
3007
3008         return 0;
3009 }
3010
3011 static int
3012 bge_resume(device_t dev)
3013 {
3014         struct bge_softc *sc = device_get_softc(dev);
3015         struct ifnet *ifp = &sc->arpcom.ac_if;
3016
3017         lwkt_serialize_enter(ifp->if_serializer);
3018
3019         if (ifp->if_flags & IFF_UP) {
3020                 bge_init(sc);
3021
3022                 if (!ifq_is_empty(&ifp->if_snd))
3023                         ifp->if_start(ifp);
3024         }
3025
3026         lwkt_serialize_exit(ifp->if_serializer);
3027
3028         return 0;
3029 }
3030
3031 static void
3032 bge_setpromisc(struct bge_softc *sc)
3033 {
3034         struct ifnet *ifp = &sc->arpcom.ac_if;
3035
3036         if (ifp->if_flags & IFF_PROMISC)
3037                 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3038         else
3039                 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3040 }
3041
3042 static void
3043 bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
3044 {
3045         struct bge_dmamap_arg *ctx = arg;
3046
3047         if (error)
3048                 return;
3049
3050         KASSERT(nsegs == 1 && ctx->bge_maxsegs == 1,
3051                 ("only one segment is allowed\n"));
3052
3053         ctx->bge_segs[0] = *segs;
3054 }
3055
3056 static void
3057 bge_dma_map_mbuf(void *arg, bus_dma_segment_t *segs, int nsegs,
3058                  bus_size_t mapsz __unused, int error)
3059 {
3060         struct bge_dmamap_arg *ctx = arg;
3061         int i;
3062
3063         if (error)
3064                 return;
3065
3066         if (nsegs > ctx->bge_maxsegs) {
3067                 ctx->bge_maxsegs = 0;
3068                 return;
3069         }
3070
3071         ctx->bge_maxsegs = nsegs;
3072         for (i = 0; i < nsegs; ++i)
3073                 ctx->bge_segs[i] = segs[i];
3074 }
3075
3076 static void
3077 bge_dma_free(struct bge_softc *sc)
3078 {
3079         int i;
3080
3081         /* Destroy RX/TX mbuf DMA stuffs. */
3082         if (sc->bge_cdata.bge_mtag != NULL) {
3083                 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3084                         if (sc->bge_cdata.bge_rx_std_dmamap[i]) {
3085                                 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3086                                     sc->bge_cdata.bge_rx_std_dmamap[i]);
3087                         }
3088                 }
3089
3090                 for (i = 0; i < BGE_TX_RING_CNT; i++) {
3091                         if (sc->bge_cdata.bge_tx_dmamap[i]) {
3092                                 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3093                                     sc->bge_cdata.bge_tx_dmamap[i]);
3094                         }
3095                 }
3096                 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
3097         }
3098
3099         /* Destroy standard RX ring */
3100         bge_dma_block_free(sc->bge_cdata.bge_rx_std_ring_tag,
3101                            sc->bge_cdata.bge_rx_std_ring_map,
3102                            sc->bge_ldata.bge_rx_std_ring);
3103
3104         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3105             sc->bge_asicrev != BGE_ASICREV_BCM5750)
3106                 bge_free_jumbo_mem(sc);
3107
3108         /* Destroy RX return ring */
3109         bge_dma_block_free(sc->bge_cdata.bge_rx_return_ring_tag,
3110                            sc->bge_cdata.bge_rx_return_ring_map,
3111                            sc->bge_ldata.bge_rx_return_ring);
3112
3113         /* Destroy TX ring */
3114         bge_dma_block_free(sc->bge_cdata.bge_tx_ring_tag,
3115                            sc->bge_cdata.bge_tx_ring_map,
3116                            sc->bge_ldata.bge_tx_ring);
3117
3118         /* Destroy status block */
3119         bge_dma_block_free(sc->bge_cdata.bge_status_tag,
3120                            sc->bge_cdata.bge_status_map,
3121                            sc->bge_ldata.bge_status_block);
3122
3123         /* Destroy statistics block */
3124         bge_dma_block_free(sc->bge_cdata.bge_stats_tag,
3125                            sc->bge_cdata.bge_stats_map,
3126                            sc->bge_ldata.bge_stats);
3127
3128         /* Destroy the parent tag */
3129         if (sc->bge_cdata.bge_parent_tag != NULL)
3130                 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
3131 }
3132
3133 static int
3134 bge_dma_alloc(struct bge_softc *sc)
3135 {
3136         struct ifnet *ifp = &sc->arpcom.ac_if;
3137         int nseg, i, error;
3138
3139         /*
3140          * Allocate the parent bus DMA tag appropriate for PCI.
3141          */
3142         error = bus_dma_tag_create(NULL, 1, 0,
3143                                    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3144                                    NULL, NULL,
3145                                    MAXBSIZE, BGE_NSEG_NEW,
3146                                    BUS_SPACE_MAXSIZE_32BIT,
3147                                    0, &sc->bge_cdata.bge_parent_tag);
3148         if (error) {
3149                 if_printf(ifp, "could not allocate parent dma tag\n");
3150                 return error;
3151         }
3152
3153         /*
3154          * Create DMA tag for mbufs.
3155          */
3156         nseg = BGE_NSEG_NEW;
3157         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0,
3158                                    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3159                                    NULL, NULL,
3160                                    MCLBYTES * nseg, nseg, MCLBYTES,
3161                                    BUS_DMA_ALLOCNOW, &sc->bge_cdata.bge_mtag);
3162         if (error) {
3163                 if_printf(ifp, "could not allocate mbuf dma tag\n");
3164                 return error;
3165         }
3166
3167         /*
3168          * Create DMA maps for TX/RX mbufs.
3169          */
3170         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3171                 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
3172                                           &sc->bge_cdata.bge_rx_std_dmamap[i]);
3173                 if (error) {
3174                         int j;
3175
3176                         for (j = 0; j < i; ++j) {
3177                                 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3178                                         sc->bge_cdata.bge_rx_std_dmamap[j]);
3179                         }
3180                         bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
3181                         sc->bge_cdata.bge_mtag = NULL;
3182
3183                         if_printf(ifp, "could not create DMA map for RX\n");
3184                         return error;
3185                 }
3186         }
3187
3188         for (i = 0; i < BGE_TX_RING_CNT; i++) {
3189                 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
3190                                           &sc->bge_cdata.bge_tx_dmamap[i]);
3191                 if (error) {
3192                         int j;
3193
3194                         for (j = 0; j < BGE_STD_RX_RING_CNT; ++j) {
3195                                 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3196                                         sc->bge_cdata.bge_rx_std_dmamap[j]);
3197                         }
3198                         for (j = 0; j < i; ++j) {
3199                                 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3200                                         sc->bge_cdata.bge_tx_dmamap[j]);
3201                         }
3202                         bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
3203                         sc->bge_cdata.bge_mtag = NULL;
3204
3205                         if_printf(ifp, "could not create DMA map for TX\n");
3206                         return error;
3207                 }
3208         }
3209
3210         /*
3211          * Create DMA stuffs for standard RX ring.
3212          */
3213         error = bge_dma_block_alloc(sc, BGE_STD_RX_RING_SZ,
3214                                     &sc->bge_cdata.bge_rx_std_ring_tag,
3215                                     &sc->bge_cdata.bge_rx_std_ring_map,
3216                                     (void **)&sc->bge_ldata.bge_rx_std_ring,
3217                                     &sc->bge_ldata.bge_rx_std_ring_paddr);
3218         if (error) {
3219                 if_printf(ifp, "could not create std RX ring\n");
3220                 return error;
3221         }
3222
3223         /*
3224          * Create jumbo buffer pool.
3225          */
3226         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3227             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
3228                 error = bge_alloc_jumbo_mem(sc);
3229                 if (error) {
3230                         if_printf(ifp, "could not create jumbo buffer pool\n");
3231                         return error;
3232                 }
3233         }
3234
3235         /*
3236          * Create DMA stuffs for RX return ring.
3237          */
3238         error = bge_dma_block_alloc(sc, BGE_RX_RTN_RING_SZ(sc),
3239                                     &sc->bge_cdata.bge_rx_return_ring_tag,
3240                                     &sc->bge_cdata.bge_rx_return_ring_map,
3241                                     (void **)&sc->bge_ldata.bge_rx_return_ring,
3242                                     &sc->bge_ldata.bge_rx_return_ring_paddr);
3243         if (error) {
3244                 if_printf(ifp, "could not create RX ret ring\n");
3245                 return error;
3246         }
3247
3248         /*
3249          * Create DMA stuffs for TX ring.
3250          */
3251         error = bge_dma_block_alloc(sc, BGE_TX_RING_SZ,
3252                                     &sc->bge_cdata.bge_tx_ring_tag,
3253                                     &sc->bge_cdata.bge_tx_ring_map,
3254                                     (void **)&sc->bge_ldata.bge_tx_ring,
3255                                     &sc->bge_ldata.bge_tx_ring_paddr);
3256         if (error) {
3257                 if_printf(ifp, "could not create TX ring\n");
3258                 return error;
3259         }
3260
3261         /*
3262          * Create DMA stuffs for status block.
3263          */
3264         error = bge_dma_block_alloc(sc, BGE_STATUS_BLK_SZ,
3265                                     &sc->bge_cdata.bge_status_tag,
3266                                     &sc->bge_cdata.bge_status_map,
3267                                     (void **)&sc->bge_ldata.bge_status_block,
3268                                     &sc->bge_ldata.bge_status_block_paddr);
3269         if (error) {
3270                 if_printf(ifp, "could not create status block\n");
3271                 return error;
3272         }
3273
3274         /*
3275          * Create DMA stuffs for statistics block.
3276          */
3277         error = bge_dma_block_alloc(sc, BGE_STATS_SZ,
3278                                     &sc->bge_cdata.bge_stats_tag,
3279                                     &sc->bge_cdata.bge_stats_map,
3280                                     (void **)&sc->bge_ldata.bge_stats,
3281                                     &sc->bge_ldata.bge_stats_paddr);
3282         if (error) {
3283                 if_printf(ifp, "could not create stats block\n");
3284                 return error;
3285         }
3286         return 0;
3287 }
3288
3289 static int
3290 bge_dma_block_alloc(struct bge_softc *sc, bus_size_t size, bus_dma_tag_t *tag,
3291                     bus_dmamap_t *map, void **addr, bus_addr_t *paddr)
3292 {
3293         struct ifnet *ifp = &sc->arpcom.ac_if;
3294         struct bge_dmamap_arg ctx;
3295         bus_dma_segment_t seg;
3296         int error;
3297
3298         /*
3299          * Create DMA tag
3300          */
3301         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0,
3302                                    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3303                                    NULL, NULL, size, 1, size, 0, tag);
3304         if (error) {
3305                 if_printf(ifp, "could not allocate dma tag\n");
3306                 return error;
3307         }
3308
3309         /*
3310          * Allocate DMA'able memory
3311          */
3312         error = bus_dmamem_alloc(*tag, addr, BUS_DMA_WAITOK | BUS_DMA_ZERO,
3313                                  map);
3314         if (error) {
3315                 if_printf(ifp, "could not allocate dma memory\n");
3316                 bus_dma_tag_destroy(*tag);
3317                 *tag = NULL;
3318                 return error;
3319         }
3320
3321         /*
3322          * Load the DMA'able memory
3323          */
3324         ctx.bge_maxsegs = 1;
3325         ctx.bge_segs = &seg;
3326         error = bus_dmamap_load(*tag, *map, *addr, size, bge_dma_map_addr, &ctx,
3327                                 BUS_DMA_WAITOK);
3328         if (error) {
3329                 if_printf(ifp, "could not load dma memory\n");
3330                 bus_dmamem_free(*tag, *addr, *map);
3331                 bus_dma_tag_destroy(*tag);
3332                 *tag = NULL;
3333                 return error;
3334         }
3335         *paddr = ctx.bge_segs[0].ds_addr;
3336
3337         return 0;
3338 }
3339
3340 static void
3341 bge_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr)
3342 {
3343         if (tag != NULL) {
3344                 bus_dmamap_unload(tag, map);
3345                 bus_dmamem_free(tag, addr, map);
3346                 bus_dma_tag_destroy(tag);
3347         }
3348 }