Busdma(9)-fy, mainly obtained from FreeBSD:
[dragonfly.git] / sys / dev / netif / bge / if_bge.c
1 /*
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
34  * $DragonFly: src/sys/dev/netif/bge/if_bge.c,v 1.68 2007/04/22 04:16:26 sephe Exp $
35  *
36  */
37
38 /*
39  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
40  * 
41  * Written by Bill Paul <wpaul@windriver.com>
42  * Senior Engineer, Wind River Systems
43  */
44
45 /*
46  * The Broadcom BCM5700 is based on technology originally developed by
47  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
49  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
51  * frames, highly configurable RX filtering, and 16 RX and TX queues
52  * (which, along with RX filter rules, can be used for QOS applications).
53  * Other features, such as TCP segmentation, may be available as part
54  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55  * firmware images can be stored in hardware and need not be compiled
56  * into the driver.
57  *
58  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
60  * 
61  * The BCM5701 is a single-chip solution incorporating both the BCM5700
62  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63  * does not support external SSRAM.
64  *
65  * Broadcom also produces a variation of the BCM5700 under the "Altima"
66  * brand name, which is functionally similar but lacks PCI-X support.
67  *
68  * Without external SSRAM, you can only have at most 4 TX rings,
69  * and the use of the mini RX ring is disabled. This seems to imply
70  * that these features are simply not available on the BCM5701. As a
71  * result, this driver does not implement any support for the mini RX
72  * ring.
73  */
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/endian.h>
78 #include <sys/sockio.h>
79 #include <sys/mbuf.h>
80 #include <sys/malloc.h>
81 #include <sys/kernel.h>
82 #include <sys/socket.h>
83 #include <sys/queue.h>
84 #include <sys/serialize.h>
85 #include <sys/thread2.h>
86
87 #include <net/if.h>
88 #include <net/ifq_var.h>
89 #include <net/if_arp.h>
90 #include <net/ethernet.h>
91 #include <net/if_dl.h>
92 #include <net/if_media.h>
93
94 #include <net/bpf.h>
95
96 #include <net/if_types.h>
97 #include <net/vlan/if_vlan_var.h>
98
99 #include <netinet/in_systm.h>
100 #include <netinet/in.h>
101 #include <netinet/ip.h>
102
103 #include <sys/bus.h>
104 #include <sys/rman.h>
105
106 #include <dev/netif/mii_layer/mii.h>
107 #include <dev/netif/mii_layer/miivar.h>
108 #include <dev/netif/mii_layer/miidevs.h>
109 #include <dev/netif/mii_layer/brgphyreg.h>
110
111 #include <bus/pci/pcidevs.h>
112 #include <bus/pci/pcireg.h>
113 #include <bus/pci/pcivar.h>
114
115 #include "if_bgereg.h"
116
117 #define BGE_CSUM_FEATURES       (CSUM_IP | CSUM_TCP | CSUM_UDP)
118
119 /* "controller miibus0" required.  See GENERIC if you get errors here. */
120 #include "miibus_if.h"
121
122 /*
123  * Various supported device vendors/types and their names. Note: the
124  * spec seems to indicate that the hardware still has Alteon's vendor
125  * ID burned into it, though it will always be overriden by the vendor
126  * ID in the EEPROM. Just to be safe, we cover all possibilities.
127  */
128 #define BGE_DEVDESC_MAX         64      /* Maximum device description length */
129
130 static struct bge_type bge_devs[] = {
131         { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700,
132                 "Alteon BCM5700 Gigabit Ethernet" },
133         { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701,
134                 "Alteon BCM5701 Gigabit Ethernet" },
135         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700,
136                 "Broadcom BCM5700 Gigabit Ethernet" },
137         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701,
138                 "Broadcom BCM5701 Gigabit Ethernet" },
139         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X,
140                 "Broadcom BCM5702X Gigabit Ethernet" },
141         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT,
142                 "Broadcom BCM5702 Gigabit Ethernet" },
143         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X,
144                 "Broadcom BCM5703X Gigabit Ethernet" },
145         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703A3,
146                 "Broadcom BCM5703 Gigabit Ethernet" },
147         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C,
148                 "Broadcom BCM5704C Dual Gigabit Ethernet" },
149         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S,
150                 "Broadcom BCM5704S Dual Gigabit Ethernet" },
151         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705,
152                 "Broadcom BCM5705 Gigabit Ethernet" },
153         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K,
154                 "Broadcom BCM5705K Gigabit Ethernet" },
155         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M,
156                 "Broadcom BCM5705M Gigabit Ethernet" },
157         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT,
158                 "Broadcom BCM5705M Gigabit Ethernet" },
159         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714,
160                 "Broadcom BCM5714C Gigabit Ethernet" },
161         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721,
162                 "Broadcom BCM5721 Gigabit Ethernet" },
163         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750,
164                 "Broadcom BCM5750 Gigabit Ethernet" },
165         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750M,
166                 "Broadcom BCM5750M Gigabit Ethernet" },
167         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751,
168                 "Broadcom BCM5751 Gigabit Ethernet" },
169         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M,
170                 "Broadcom BCM5751M Gigabit Ethernet" },
171         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752,
172                 "Broadcom BCM5752 Gigabit Ethernet" },
173         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782,
174                 "Broadcom BCM5782 Gigabit Ethernet" },
175         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788,
176                 "Broadcom BCM5788 Gigabit Ethernet" },
177         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789,
178                 "Broadcom BCM5789 Gigabit Ethernet" },
179         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901,
180                 "Broadcom BCM5901 Fast Ethernet" },
181         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2,
182                 "Broadcom BCM5901A2 Fast Ethernet" },
183         { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
184                 "SysKonnect Gigabit Ethernet" },
185         { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000,
186                 "Altima AC1000 Gigabit Ethernet" },
187         { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001,
188                 "Altima AC1002 Gigabit Ethernet" },
189         { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100,
190                 "Altima AC9100 Gigabit Ethernet" },
191         { 0, 0, NULL }
192 };
193
194 static int      bge_probe(device_t);
195 static int      bge_attach(device_t);
196 static int      bge_detach(device_t);
197 static void     bge_release_resources(struct bge_softc *);
198 static void     bge_txeof(struct bge_softc *);
199 static void     bge_rxeof(struct bge_softc *);
200
201 static void     bge_tick(void *);
202 static void     bge_tick_serialized(void *);
203 static void     bge_stats_update(struct bge_softc *);
204 static void     bge_stats_update_regs(struct bge_softc *);
205 static int      bge_encap(struct bge_softc *, struct mbuf *, uint32_t *);
206
207 static void     bge_intr(void *);
208 static void     bge_start(struct ifnet *);
209 static int      bge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
210 static void     bge_init(void *);
211 static void     bge_stop(struct bge_softc *);
212 static void     bge_watchdog(struct ifnet *);
213 static void     bge_shutdown(device_t);
214 static int      bge_suspend(device_t);
215 static int      bge_resume(device_t);
216 static int      bge_ifmedia_upd(struct ifnet *);
217 static void     bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
218
219 static uint8_t  bge_eeprom_getbyte(struct bge_softc *, uint32_t, uint8_t *);
220 static int      bge_read_eeprom(struct bge_softc *, caddr_t, uint32_t, size_t);
221
222 static void     bge_setmulti(struct bge_softc *);
223 static void     bge_setpromisc(struct bge_softc *);
224
225 static void     bge_handle_events(struct bge_softc *);
226 static int      bge_alloc_jumbo_mem(struct bge_softc *);
227 static void     bge_free_jumbo_mem(struct bge_softc *);
228 static struct bge_jslot
229                 *bge_jalloc(struct bge_softc *);
230 static void     bge_jfree(void *);
231 static void     bge_jref(void *);
232 static int      bge_newbuf_std(struct bge_softc *, int, struct mbuf *);
233 static int      bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
234 static int      bge_init_rx_ring_std(struct bge_softc *);
235 static void     bge_free_rx_ring_std(struct bge_softc *);
236 static int      bge_init_rx_ring_jumbo(struct bge_softc *);
237 static void     bge_free_rx_ring_jumbo(struct bge_softc *);
238 static void     bge_free_tx_ring(struct bge_softc *);
239 static int      bge_init_tx_ring(struct bge_softc *);
240
241 static int      bge_chipinit(struct bge_softc *);
242 static int      bge_blockinit(struct bge_softc *);
243
244 #ifdef notdef
245 static uint8_t  bge_vpd_readbyte(struct bge_softc *, uint32_t);
246 static void     bge_vpd_read_res(struct bge_softc *, struct vpd_res *, uint32_t);
247 static void     bge_vpd_read(struct bge_softc *);
248 #endif
249
250 static uint32_t bge_readmem_ind(struct bge_softc *, uint32_t);
251 static void     bge_writemem_ind(struct bge_softc *, uint32_t, uint32_t);
252 #ifdef notdef
253 static uint32_t bge_readreg_ind(struct bge_softc *, uint32_t);
254 #endif
255 static void     bge_writereg_ind(struct bge_softc *, uint32_t, uint32_t);
256
257 static int      bge_miibus_readreg(device_t, int, int);
258 static int      bge_miibus_writereg(device_t, int, int, int);
259 static void     bge_miibus_statchg(device_t);
260
261 static void     bge_reset(struct bge_softc *);
262
263 static void     bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
264 static void     bge_dma_map_mbuf(void *, bus_dma_segment_t *, int,
265                                  bus_size_t, int);
266 static int      bge_dma_alloc(struct bge_softc *);
267 static void     bge_dma_free(struct bge_softc *);
268 static int      bge_dma_block_alloc(struct bge_softc *, bus_size_t,
269                                     bus_dma_tag_t *, bus_dmamap_t *,
270                                     void **, bus_addr_t *);
271 static void     bge_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
272
273 /*
274  * Set following tunable to 1 for some IBM blade servers with the DNLK
275  * switch module. Auto negotiation is broken for those configurations.
276  */
277 static int      bge_fake_autoneg = 0;
278 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg);
279
280 static device_method_t bge_methods[] = {
281         /* Device interface */
282         DEVMETHOD(device_probe,         bge_probe),
283         DEVMETHOD(device_attach,        bge_attach),
284         DEVMETHOD(device_detach,        bge_detach),
285         DEVMETHOD(device_shutdown,      bge_shutdown),
286         DEVMETHOD(device_suspend,       bge_suspend),
287         DEVMETHOD(device_resume,        bge_resume),
288
289         /* bus interface */
290         DEVMETHOD(bus_print_child,      bus_generic_print_child),
291         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
292
293         /* MII interface */
294         DEVMETHOD(miibus_readreg,       bge_miibus_readreg),
295         DEVMETHOD(miibus_writereg,      bge_miibus_writereg),
296         DEVMETHOD(miibus_statchg,       bge_miibus_statchg),
297
298         { 0, 0 }
299 };
300
301 static DEFINE_CLASS_0(bge, bge_driver, bge_methods, sizeof(struct bge_softc));
302 static devclass_t bge_devclass;
303
304 DECLARE_DUMMY_MODULE(if_bge);
305 DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, 0, 0);
306 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
307
308 static uint32_t
309 bge_readmem_ind(struct bge_softc *sc, uint32_t off)
310 {
311         device_t dev = sc->bge_dev;
312
313         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
314         return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
315 }
316
317 static void
318 bge_writemem_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
319 {
320         device_t dev = sc->bge_dev;
321
322         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
323         pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
324 }
325
326 #ifdef notdef
327 static uint32_t
328 bge_readreg_ind(struct bge_softc *sc, uin32_t off)
329 {
330         device_t dev = sc->bge_dev;
331
332         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
333         return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
334 }
335 #endif
336
337 static void
338 bge_writereg_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
339 {
340         device_t dev = sc->bge_dev;
341
342         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
343         pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
344 }
345
346 #ifdef notdef
347 static uint8_t
348 bge_vpd_readbyte(struct bge_softc *sc, uint32_t addr)
349 {
350         device_t dev = sc->bge_dev;
351         uint32_t val;
352         int i;
353
354         pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
355         for (i = 0; i < BGE_TIMEOUT * 10; i++) {
356                 DELAY(10);
357                 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
358                         break;
359         }
360
361         if (i == BGE_TIMEOUT) {
362                 device_printf(sc->bge_dev, "VPD read timed out\n");
363                 return(0);
364         }
365
366         val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
367
368         return((val >> ((addr % 4) * 8)) & 0xFF);
369 }
370
371 static void
372 bge_vpd_read_res(struct bge_softc *sc, struct vpd_res *res, uint32_t addr)
373 {
374         size_t i;
375         uint8_t *ptr;
376
377         ptr = (uint8_t *)res;
378         for (i = 0; i < sizeof(struct vpd_res); i++)
379                 ptr[i] = bge_vpd_readbyte(sc, i + addr);
380
381         return;
382 }
383
384 static void
385 bge_vpd_read(struct bge_softc *sc)
386 {
387         int pos = 0, i;
388         struct vpd_res res;
389
390         if (sc->bge_vpd_prodname != NULL)
391                 kfree(sc->bge_vpd_prodname, M_DEVBUF);
392         if (sc->bge_vpd_readonly != NULL)
393                 kfree(sc->bge_vpd_readonly, M_DEVBUF);
394         sc->bge_vpd_prodname = NULL;
395         sc->bge_vpd_readonly = NULL;
396
397         bge_vpd_read_res(sc, &res, pos);
398
399         if (res.vr_id != VPD_RES_ID) {
400                 device_printf(sc->bge_dev,
401                               "bad VPD resource id: expected %x got %x\n",
402                               VPD_RES_ID, res.vr_id);
403                 return;
404         }
405
406         pos += sizeof(res);
407         sc->bge_vpd_prodname = kmalloc(res.vr_len + 1, M_DEVBUF, M_INTWAIT);
408         for (i = 0; i < res.vr_len; i++)
409                 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
410         sc->bge_vpd_prodname[i] = '\0';
411         pos += i;
412
413         bge_vpd_read_res(sc, &res, pos);
414
415         if (res.vr_id != VPD_RES_READ) {
416                 device_printf(sc->bge_dev,
417                               "bad VPD resource id: expected %x got %x\n",
418                               VPD_RES_READ, res.vr_id);
419                 return;
420         }
421
422         pos += sizeof(res);
423         sc->bge_vpd_readonly = kmalloc(res.vr_len, M_DEVBUF, M_INTWAIT);
424         for (i = 0; i < res.vr_len + 1; i++)
425                 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
426 }
427 #endif
428
429 /*
430  * Read a byte of data stored in the EEPROM at address 'addr.' The
431  * BCM570x supports both the traditional bitbang interface and an
432  * auto access interface for reading the EEPROM. We use the auto
433  * access method.
434  */
435 static uint8_t
436 bge_eeprom_getbyte(struct bge_softc *sc, uint32_t addr, uint8_t *dest)
437 {
438         int i;
439         uint32_t byte = 0;
440
441         /*
442          * Enable use of auto EEPROM access so we can avoid
443          * having to use the bitbang method.
444          */
445         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
446
447         /* Reset the EEPROM, load the clock period. */
448         CSR_WRITE_4(sc, BGE_EE_ADDR,
449             BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
450         DELAY(20);
451
452         /* Issue the read EEPROM command. */
453         CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
454
455         /* Wait for completion */
456         for(i = 0; i < BGE_TIMEOUT * 10; i++) {
457                 DELAY(10);
458                 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
459                         break;
460         }
461
462         if (i == BGE_TIMEOUT) {
463                 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
464                 return(1);
465         }
466
467         /* Get result. */
468         byte = CSR_READ_4(sc, BGE_EE_DATA);
469
470         *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
471
472         return(0);
473 }
474
475 /*
476  * Read a sequence of bytes from the EEPROM.
477  */
478 static int
479 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, uint32_t off, size_t len)
480 {
481         size_t i;
482         int err;
483         uint8_t byte;
484
485         for (byte = 0, err = 0, i = 0; i < len; i++) {
486                 err = bge_eeprom_getbyte(sc, off + i, &byte);
487                 if (err)
488                         break;
489                 *(dest + i) = byte;
490         }
491
492         return(err ? 1 : 0);
493 }
494
495 static int
496 bge_miibus_readreg(device_t dev, int phy, int reg)
497 {
498         struct bge_softc *sc;
499         struct ifnet *ifp;
500         uint32_t val, autopoll;
501         int i;
502
503         sc = device_get_softc(dev);
504         ifp = &sc->arpcom.ac_if;
505
506         /*
507          * Broadcom's own driver always assumes the internal
508          * PHY is at GMII address 1. On some chips, the PHY responds
509          * to accesses at all addresses, which could cause us to
510          * bogusly attach the PHY 32 times at probe type. Always
511          * restricting the lookup to address 1 is simpler than
512          * trying to figure out which chips revisions should be
513          * special-cased.
514          */
515         if (phy != 1)
516                 return(0);
517
518         /* Reading with autopolling on may trigger PCI errors */
519         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
520         if (autopoll & BGE_MIMODE_AUTOPOLL) {
521                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
522                 DELAY(40);
523         }
524
525         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
526             BGE_MIPHY(phy)|BGE_MIREG(reg));
527
528         for (i = 0; i < BGE_TIMEOUT; i++) {
529                 val = CSR_READ_4(sc, BGE_MI_COMM);
530                 if (!(val & BGE_MICOMM_BUSY))
531                         break;
532         }
533
534         if (i == BGE_TIMEOUT) {
535                 if_printf(ifp, "PHY read timed out\n");
536                 val = 0;
537                 goto done;
538         }
539
540         val = CSR_READ_4(sc, BGE_MI_COMM);
541
542 done:
543         if (autopoll & BGE_MIMODE_AUTOPOLL) {
544                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
545                 DELAY(40);
546         }
547
548         if (val & BGE_MICOMM_READFAIL)
549                 return(0);
550
551         return(val & 0xFFFF);
552 }
553
554 static int
555 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
556 {
557         struct bge_softc *sc;
558         uint32_t autopoll;
559         int i;
560
561         sc = device_get_softc(dev);
562
563         /* Reading with autopolling on may trigger PCI errors */
564         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
565         if (autopoll & BGE_MIMODE_AUTOPOLL) {
566                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
567                 DELAY(40);
568         }
569
570         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
571             BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
572
573         for (i = 0; i < BGE_TIMEOUT; i++) {
574                 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
575                         break;
576         }
577
578         if (autopoll & BGE_MIMODE_AUTOPOLL) {
579                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
580                 DELAY(40);
581         }
582
583         if (i == BGE_TIMEOUT) {
584                 if_printf(&sc->arpcom.ac_if, "PHY read timed out\n");
585                 return(0);
586         }
587
588         return(0);
589 }
590
591 static void
592 bge_miibus_statchg(device_t dev)
593 {
594         struct bge_softc *sc;
595         struct mii_data *mii;
596
597         sc = device_get_softc(dev);
598         mii = device_get_softc(sc->bge_miibus);
599
600         BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
601         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
602                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
603         } else {
604                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
605         }
606
607         if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
608                 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
609         } else {
610                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
611         }
612 }
613
614 /*
615  * Handle events that have triggered interrupts.
616  */
617 static void
618 bge_handle_events(struct bge_softc *sc)
619 {
620 }
621
622 /*
623  * Memory management for jumbo frames.
624  */
625 static int
626 bge_alloc_jumbo_mem(struct bge_softc *sc)
627 {
628         struct ifnet *ifp = &sc->arpcom.ac_if;
629         struct bge_jslot *entry;
630         uint8_t *ptr;
631         bus_addr_t paddr;
632         int i, error;
633
634         /*
635          * Create tag for jumbo mbufs.
636          * This is really a bit of a kludge. We allocate a special
637          * jumbo buffer pool which (thanks to the way our DMA
638          * memory allocation works) will consist of contiguous
639          * pages. This means that even though a jumbo buffer might
640          * be larger than a page size, we don't really need to
641          * map it into more than one DMA segment. However, the
642          * default mbuf tag will result in multi-segment mappings,
643          * so we have to create a special jumbo mbuf tag that
644          * lets us get away with mapping the jumbo buffers as
645          * a single segment. I think eventually the driver should
646          * be changed so that it uses ordinary mbufs and cluster
647          * buffers, i.e. jumbo frames can span multiple DMA
648          * descriptors. But that's a project for another day.
649          */
650
651         /*
652          * Create DMA stuffs for jumbo RX ring.
653          */
654         error = bge_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ,
655                                     &sc->bge_cdata.bge_rx_jumbo_ring_tag,
656                                     &sc->bge_cdata.bge_rx_jumbo_ring_map,
657                                     (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
658                                     &sc->bge_ldata.bge_rx_jumbo_ring_paddr);
659         if (error) {
660                 if_printf(ifp, "could not create jumbo RX ring\n");
661                 return error;
662         }
663
664         /*
665          * Create DMA stuffs for jumbo buffer block.
666          */
667         error = bge_dma_block_alloc(sc, BGE_JMEM,
668                                     &sc->bge_cdata.bge_jumbo_tag,
669                                     &sc->bge_cdata.bge_jumbo_map,
670                                     (void **)&sc->bge_ldata.bge_jumbo_buf,
671                                     &paddr);
672         if (error) {
673                 if_printf(ifp, "could not create jumbo buffer\n");
674                 return error;
675         }
676
677         SLIST_INIT(&sc->bge_jfree_listhead);
678
679         /*
680          * Now divide it up into 9K pieces and save the addresses
681          * in an array. Note that we play an evil trick here by using
682          * the first few bytes in the buffer to hold the the address
683          * of the softc structure for this interface. This is because
684          * bge_jfree() needs it, but it is called by the mbuf management
685          * code which will not pass it to us explicitly.
686          */
687         for (i = 0, ptr = sc->bge_ldata.bge_jumbo_buf; i < BGE_JSLOTS; i++) {
688                 entry = &sc->bge_cdata.bge_jslots[i];
689                 entry->bge_sc = sc;
690                 entry->bge_buf = ptr;
691                 entry->bge_paddr = paddr;
692                 entry->bge_inuse = 0;
693                 entry->bge_slot = i;
694                 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jslot_link);
695
696                 ptr += BGE_JLEN;
697                 paddr += BGE_JLEN;
698         }
699         return 0;
700 }
701
702 static void
703 bge_free_jumbo_mem(struct bge_softc *sc)
704 {
705         /* Destroy jumbo RX ring. */
706         bge_dma_block_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
707                            sc->bge_cdata.bge_rx_jumbo_ring_map,
708                            sc->bge_ldata.bge_rx_jumbo_ring);
709
710         /* Destroy jumbo buffer block. */
711         bge_dma_block_free(sc->bge_cdata.bge_jumbo_tag,
712                            sc->bge_cdata.bge_jumbo_map,
713                            sc->bge_ldata.bge_jumbo_buf);
714 }
715
716 /*
717  * Allocate a jumbo buffer.
718  */
719 static struct bge_jslot *
720 bge_jalloc(struct bge_softc *sc)
721 {
722         struct bge_jslot *entry;
723
724         lwkt_serialize_enter(&sc->bge_jslot_serializer);
725         entry = SLIST_FIRST(&sc->bge_jfree_listhead);
726         if (entry) {
727                 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jslot_link);
728                 entry->bge_inuse = 1;
729         } else {
730                 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
731         }
732         lwkt_serialize_exit(&sc->bge_jslot_serializer);
733         return(entry);
734 }
735
736 /*
737  * Adjust usage count on a jumbo buffer.
738  */
739 static void
740 bge_jref(void *arg)
741 {
742         struct bge_jslot *entry = (struct bge_jslot *)arg;
743         struct bge_softc *sc = entry->bge_sc;
744
745         if (sc == NULL)
746                 panic("bge_jref: can't find softc pointer!");
747
748         if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
749                 panic("bge_jref: asked to reference buffer "
750                     "that we don't manage!");
751         } else if (entry->bge_inuse == 0) {
752                 panic("bge_jref: buffer already free!");
753         } else {
754                 atomic_add_int(&entry->bge_inuse, 1);
755         }
756 }
757
758 /*
759  * Release a jumbo buffer.
760  */
761 static void
762 bge_jfree(void *arg)
763 {
764         struct bge_jslot *entry = (struct bge_jslot *)arg;
765         struct bge_softc *sc = entry->bge_sc;
766
767         if (sc == NULL)
768                 panic("bge_jfree: can't find softc pointer!");
769
770         if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
771                 panic("bge_jfree: asked to free buffer that we don't manage!");
772         } else if (entry->bge_inuse == 0) {
773                 panic("bge_jfree: buffer already free!");
774         } else {
775                 /*
776                  * Possible MP race to 0, use the serializer.  The atomic insn
777                  * is still needed for races against bge_jref().
778                  */
779                 lwkt_serialize_enter(&sc->bge_jslot_serializer);
780                 atomic_subtract_int(&entry->bge_inuse, 1);
781                 if (entry->bge_inuse == 0) {
782                         SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 
783                                           entry, jslot_link);
784                 }
785                 lwkt_serialize_exit(&sc->bge_jslot_serializer);
786         }
787 }
788
789
790 /*
791  * Intialize a standard receive ring descriptor.
792  */
793 static int
794 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m)
795 {
796         struct mbuf *m_new = NULL;
797         struct bge_dmamap_arg ctx;
798         bus_dma_segment_t seg;
799         struct bge_rx_bd *r;
800         int error;
801
802         if (m == NULL) {
803                 m_new = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
804                 if (m_new == NULL)
805                         return ENOBUFS;
806         } else {
807                 m_new = m;
808                 m_new->m_data = m_new->m_ext.ext_buf;
809         }
810         m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
811
812         if (!sc->bge_rx_alignment_bug)
813                 m_adj(m_new, ETHER_ALIGN);
814
815         ctx.bge_maxsegs = 1;
816         ctx.bge_segs = &seg;
817         error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag,
818                                      sc->bge_cdata.bge_rx_std_dmamap[i],
819                                      m_new, bge_dma_map_mbuf, &ctx,
820                                      BUS_DMA_NOWAIT);
821         if (error || ctx.bge_maxsegs == 0) {
822                 if (m == NULL)
823                         m_freem(m_new);
824                 return ENOMEM;
825         }
826
827         sc->bge_cdata.bge_rx_std_chain[i] = m_new;
828
829         r = &sc->bge_ldata.bge_rx_std_ring[i];
830         r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_segs[0].ds_addr);
831         r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_segs[0].ds_addr);
832         r->bge_flags = BGE_RXBDFLAG_END;
833         r->bge_len = m_new->m_len;
834         r->bge_idx = i;
835
836         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
837                         sc->bge_cdata.bge_rx_std_dmamap[i],
838                         BUS_DMASYNC_PREREAD);
839         return 0;
840 }
841
842 /*
843  * Initialize a jumbo receive ring descriptor. This allocates
844  * a jumbo buffer from the pool managed internally by the driver.
845  */
846 static int
847 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
848 {
849         struct mbuf *m_new = NULL;
850         struct bge_jslot *buf;
851         struct bge_rx_bd *r;
852         bus_addr_t paddr;
853
854         if (m == NULL) {
855                 /* Allocate the mbuf. */
856                 MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
857                 if (m_new == NULL)
858                         return(ENOBUFS);
859
860                 /* Allocate the jumbo buffer */
861                 buf = bge_jalloc(sc);
862                 if (buf == NULL) {
863                         m_freem(m_new);
864                         if_printf(&sc->arpcom.ac_if, "jumbo allocation failed "
865                             "-- packet dropped!\n");
866                         return ENOBUFS;
867                 }
868
869                 /* Attach the buffer to the mbuf. */
870                 m_new->m_ext.ext_arg = buf;
871                 m_new->m_ext.ext_buf = buf->bge_buf;
872                 m_new->m_ext.ext_free = bge_jfree;
873                 m_new->m_ext.ext_ref = bge_jref;
874                 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
875
876                 m_new->m_flags |= M_EXT;
877         } else {
878                 KKASSERT(m->m_flags & M_EXT);
879                 m_new = m;
880                 buf = m_new->m_ext.ext_arg;
881         }
882         m_new->m_data = m_new->m_ext.ext_buf;
883         m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
884
885         paddr = buf->bge_paddr;
886         if (!sc->bge_rx_alignment_bug) {
887                 m_adj(m_new, ETHER_ALIGN);
888                 paddr += ETHER_ALIGN;
889         }
890
891         /* Set up the descriptor. */
892         sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
893
894         r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
895         r->bge_addr.bge_addr_lo = BGE_ADDR_LO(paddr);
896         r->bge_addr.bge_addr_hi = BGE_ADDR_HI(paddr);
897         r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
898         r->bge_len = m_new->m_len;
899         r->bge_idx = i;
900
901         return 0;
902 }
903
904 /*
905  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
906  * that's 1MB or memory, which is a lot. For now, we fill only the first
907  * 256 ring entries and hope that our CPU is fast enough to keep up with
908  * the NIC.
909  */
910 static int
911 bge_init_rx_ring_std(struct bge_softc *sc)
912 {
913         int i;
914
915         for (i = 0; i < BGE_SSLOTS; i++) {
916                 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
917                         return(ENOBUFS);
918         };
919
920         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
921                         sc->bge_cdata.bge_rx_std_ring_map,
922                         BUS_DMASYNC_PREWRITE);
923
924         sc->bge_std = i - 1;
925         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
926
927         return(0);
928 }
929
930 static void
931 bge_free_rx_ring_std(struct bge_softc *sc)
932 {
933         int i;
934
935         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
936                 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
937                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
938                                           sc->bge_cdata.bge_rx_std_dmamap[i]);
939                         m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
940                         sc->bge_cdata.bge_rx_std_chain[i] = NULL;
941                 }
942                 bzero(&sc->bge_ldata.bge_rx_std_ring[i],
943                     sizeof(struct bge_rx_bd));
944         }
945 }
946
947 static int
948 bge_init_rx_ring_jumbo(struct bge_softc *sc)
949 {
950         int i;
951         struct bge_rcb *rcb;
952
953         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
954                 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
955                         return(ENOBUFS);
956         };
957
958         bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
959                         sc->bge_cdata.bge_rx_jumbo_ring_map,
960                         BUS_DMASYNC_PREWRITE);
961
962         sc->bge_jumbo = i - 1;
963
964         rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
965         rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
966         CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
967
968         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
969
970         return(0);
971 }
972
973 static void
974 bge_free_rx_ring_jumbo(struct bge_softc *sc)
975 {
976         int i;
977
978         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
979                 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
980                         m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
981                         sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
982                 }
983                 bzero(&sc->bge_ldata.bge_rx_jumbo_ring[i],
984                     sizeof(struct bge_rx_bd));
985         }
986 }
987
988 static void
989 bge_free_tx_ring(struct bge_softc *sc)
990 {
991         int i;
992
993         for (i = 0; i < BGE_TX_RING_CNT; i++) {
994                 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
995                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
996                                           sc->bge_cdata.bge_tx_dmamap[i]);
997                         m_freem(sc->bge_cdata.bge_tx_chain[i]);
998                         sc->bge_cdata.bge_tx_chain[i] = NULL;
999                 }
1000                 bzero(&sc->bge_ldata.bge_tx_ring[i],
1001                     sizeof(struct bge_tx_bd));
1002         }
1003 }
1004
1005 static int
1006 bge_init_tx_ring(struct bge_softc *sc)
1007 {
1008         sc->bge_txcnt = 0;
1009         sc->bge_tx_saved_considx = 0;
1010         sc->bge_tx_prodidx = 0;
1011
1012         /* Initialize transmit producer index for host-memory send ring. */
1013         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1014
1015         /* 5700 b2 errata */
1016         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1017                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1018
1019         CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1020         /* 5700 b2 errata */
1021         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1022                 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1023
1024         return(0);
1025 }
1026
1027 static void
1028 bge_setmulti(struct bge_softc *sc)
1029 {
1030         struct ifnet *ifp;
1031         struct ifmultiaddr *ifma;
1032         uint32_t hashes[4] = { 0, 0, 0, 0 };
1033         int h, i;
1034
1035         ifp = &sc->arpcom.ac_if;
1036
1037         if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1038                 for (i = 0; i < 4; i++)
1039                         CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1040                 return;
1041         }
1042
1043         /* First, zot all the existing filters. */
1044         for (i = 0; i < 4; i++)
1045                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1046
1047         /* Now program new ones. */
1048         LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1049                 if (ifma->ifma_addr->sa_family != AF_LINK)
1050                         continue;
1051                 h = ether_crc32_le(
1052                     LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1053                     ETHER_ADDR_LEN) & 0x7f;
1054                 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1055         }
1056
1057         for (i = 0; i < 4; i++)
1058                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1059 }
1060
1061 /*
1062  * Do endian, PCI and DMA initialization. Also check the on-board ROM
1063  * self-test results.
1064  */
1065 static int
1066 bge_chipinit(struct bge_softc *sc)
1067 {
1068         int i;
1069         uint32_t dma_rw_ctl;
1070
1071         /* Set endian type before we access any non-PCI registers. */
1072         pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1073
1074         /*
1075          * Check the 'ROM failed' bit on the RX CPU to see if
1076          * self-tests passed.
1077          */
1078         if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1079                 if_printf(&sc->arpcom.ac_if,
1080                           "RX CPU self-diagnostics failed!\n");
1081                 return(ENODEV);
1082         }
1083
1084         /* Clear the MAC control register */
1085         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1086
1087         /*
1088          * Clear the MAC statistics block in the NIC's
1089          * internal memory.
1090          */
1091         for (i = BGE_STATS_BLOCK;
1092             i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1093                 BGE_MEMWIN_WRITE(sc, i, 0);
1094
1095         for (i = BGE_STATUS_BLOCK;
1096             i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1097                 BGE_MEMWIN_WRITE(sc, i, 0);
1098
1099         /* Set up the PCI DMA control register. */
1100         if (sc->bge_pcie) {
1101                 /* PCI Express */
1102                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1103                     (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1104                     (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1105         } else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1106                    BGE_PCISTATE_PCI_BUSMODE) {
1107                 /* Conventional PCI bus */
1108                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1109                     (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1110                     (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1111                     (0x0F);
1112         } else {
1113                 /* PCI-X bus */
1114                 /*
1115                  * The 5704 uses a different encoding of read/write
1116                  * watermarks.
1117                  */
1118                 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1119                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1120                             (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1121                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1122                 else
1123                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1124                             (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1125                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1126                             (0x0F);
1127
1128                 /*
1129                  * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1130                  * for hardware bugs.
1131                  */
1132                 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1133                     sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1134                         uint32_t tmp;
1135
1136                         tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1137                         if (tmp == 0x6 || tmp == 0x7)
1138                                 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1139                 }
1140         }
1141
1142         if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1143             sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1144             sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1145             sc->bge_asicrev == BGE_ASICREV_BCM5750)
1146                 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1147         pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1148
1149         /*
1150          * Set up general mode register.
1151          */
1152         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1153             BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1154             BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
1155
1156         /*
1157          * Disable memory write invalidate.  Apparently it is not supported
1158          * properly by these devices.
1159          */
1160         PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1161
1162         /* Set the timer prescaler (always 66Mhz) */
1163         CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1164
1165         return(0);
1166 }
1167
1168 static int
1169 bge_blockinit(struct bge_softc *sc)
1170 {
1171         struct bge_rcb *rcb;
1172         bus_size_t vrcb;
1173         bge_hostaddr taddr;
1174         int i;
1175
1176         /*
1177          * Initialize the memory window pointer register so that
1178          * we can access the first 32K of internal NIC RAM. This will
1179          * allow us to set up the TX send ring RCBs and the RX return
1180          * ring RCBs, plus other things which live in NIC memory.
1181          */
1182         CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1183
1184         /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1185
1186         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1187             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1188                 /* Configure mbuf memory pool */
1189                 if (sc->bge_extram) {
1190                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1191                             BGE_EXT_SSRAM);
1192                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1193                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1194                         else
1195                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1196                 } else {
1197                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1198                             BGE_BUFFPOOL_1);
1199                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1200                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1201                         else
1202                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1203                 }
1204
1205                 /* Configure DMA resource pool */
1206                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1207                     BGE_DMA_DESCRIPTORS);
1208                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1209         }
1210
1211         /* Configure mbuf pool watermarks */
1212         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1213             sc->bge_asicrev == BGE_ASICREV_BCM5750) {
1214                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1215                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1216         } else {
1217                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1218                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1219         }
1220         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1221
1222         /* Configure DMA resource watermarks */
1223         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1224         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1225
1226         /* Enable buffer manager */
1227         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1228             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1229                 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1230                     BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1231
1232                 /* Poll for buffer manager start indication */
1233                 for (i = 0; i < BGE_TIMEOUT; i++) {
1234                         if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1235                                 break;
1236                         DELAY(10);
1237                 }
1238
1239                 if (i == BGE_TIMEOUT) {
1240                         if_printf(&sc->arpcom.ac_if,
1241                                   "buffer manager failed to start\n");
1242                         return(ENXIO);
1243                 }
1244         }
1245
1246         /* Enable flow-through queues */
1247         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1248         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1249
1250         /* Wait until queue initialization is complete */
1251         for (i = 0; i < BGE_TIMEOUT; i++) {
1252                 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1253                         break;
1254                 DELAY(10);
1255         }
1256
1257         if (i == BGE_TIMEOUT) {
1258                 if_printf(&sc->arpcom.ac_if,
1259                           "flow-through queue init failed\n");
1260                 return(ENXIO);
1261         }
1262
1263         /* Initialize the standard RX ring control block */
1264         rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1265         rcb->bge_hostaddr.bge_addr_lo =
1266             BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1267         rcb->bge_hostaddr.bge_addr_hi =
1268             BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1269         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1270             sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1271         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1272             sc->bge_asicrev == BGE_ASICREV_BCM5750)
1273                 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1274         else
1275                 rcb->bge_maxlen_flags =
1276                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1277         if (sc->bge_extram)
1278                 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1279         else
1280                 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1281         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1282         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1283         CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1284         CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1285
1286         /*
1287          * Initialize the jumbo RX ring control block
1288          * We set the 'ring disabled' bit in the flags
1289          * field until we're actually ready to start
1290          * using this ring (i.e. once we set the MTU
1291          * high enough to require it).
1292          */
1293         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1294             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1295                 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1296
1297                 rcb->bge_hostaddr.bge_addr_lo =
1298                     BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1299                 rcb->bge_hostaddr.bge_addr_hi =
1300                     BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1301                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1302                     sc->bge_cdata.bge_rx_jumbo_ring_map,
1303                     BUS_DMASYNC_PREREAD);
1304                 rcb->bge_maxlen_flags =
1305                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1306                     BGE_RCB_FLAG_RING_DISABLED);
1307                 if (sc->bge_extram)
1308                         rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1309                 else
1310                         rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1311                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1312                     rcb->bge_hostaddr.bge_addr_hi);
1313                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1314                     rcb->bge_hostaddr.bge_addr_lo);
1315                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1316                     rcb->bge_maxlen_flags);
1317                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1318
1319                 /* Set up dummy disabled mini ring RCB */
1320                 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1321                 rcb->bge_maxlen_flags =
1322                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1323                 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1324                     rcb->bge_maxlen_flags);
1325         }
1326
1327         /*
1328          * Set the BD ring replentish thresholds. The recommended
1329          * values are 1/8th the number of descriptors allocated to
1330          * each ring.
1331          */
1332         CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1333         CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1334
1335         /*
1336          * Disable all unused send rings by setting the 'ring disabled'
1337          * bit in the flags field of all the TX send ring control blocks.
1338          * These are located in NIC memory.
1339          */
1340         vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1341         for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1342                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1343                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1344                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1345                 vrcb += sizeof(struct bge_rcb);
1346         }
1347
1348         /* Configure TX RCB 0 (we use only the first ring) */
1349         vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1350         BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1351         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1352         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1353         RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1354             BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1355         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1356             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1357                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1358                     BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1359         }
1360
1361         /* Disable all unused RX return rings */
1362         vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1363         for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1364                 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1365                 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1366                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1367                     BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1368                     BGE_RCB_FLAG_RING_DISABLED));
1369                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1370                 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1371                     (i * (sizeof(uint64_t))), 0);
1372                 vrcb += sizeof(struct bge_rcb);
1373         }
1374
1375         /* Initialize RX ring indexes */
1376         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1377         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1378         CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1379
1380         /*
1381          * Set up RX return ring 0
1382          * Note that the NIC address for RX return rings is 0x00000000.
1383          * The return rings live entirely within the host, so the
1384          * nicaddr field in the RCB isn't used.
1385          */
1386         vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1387         BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1388         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1389         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1390         RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1391         RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1392             BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1393
1394         /* Set random backoff seed for TX */
1395         CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1396             sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1397             sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1398             sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1399             BGE_TX_BACKOFF_SEED_MASK);
1400
1401         /* Set inter-packet gap */
1402         CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1403
1404         /*
1405          * Specify which ring to use for packets that don't match
1406          * any RX rules.
1407          */
1408         CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1409
1410         /*
1411          * Configure number of RX lists. One interrupt distribution
1412          * list, sixteen active lists, one bad frames class.
1413          */
1414         CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1415
1416         /* Inialize RX list placement stats mask. */
1417         CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1418         CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1419
1420         /* Disable host coalescing until we get it set up */
1421         CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1422
1423         /* Poll to make sure it's shut down. */
1424         for (i = 0; i < BGE_TIMEOUT; i++) {
1425                 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1426                         break;
1427                 DELAY(10);
1428         }
1429
1430         if (i == BGE_TIMEOUT) {
1431                 if_printf(&sc->arpcom.ac_if,
1432                           "host coalescing engine failed to idle\n");
1433                 return(ENXIO);
1434         }
1435
1436         /* Set up host coalescing defaults */
1437         CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1438         CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1439         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1440         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1441         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1442             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1443                 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1444                 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1445         }
1446         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1447         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1448
1449         /* Set up address of statistics block */
1450         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1451             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1452                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1453                     BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1454                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1455                     BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1456
1457                 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1458                 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1459                 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1460         }
1461
1462         /* Set up address of status block */
1463         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1464             BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1465         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1466             BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1467         sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1468         sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1469
1470         /* Turn on host coalescing state machine */
1471         CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1472
1473         /* Turn on RX BD completion state machine and enable attentions */
1474         CSR_WRITE_4(sc, BGE_RBDC_MODE,
1475             BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1476
1477         /* Turn on RX list placement state machine */
1478         CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1479
1480         /* Turn on RX list selector state machine. */
1481         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1482             sc->bge_asicrev != BGE_ASICREV_BCM5750)
1483                 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1484
1485         /* Turn on DMA, clear stats */
1486         CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1487             BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1488             BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1489             BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1490             (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1491
1492         /* Set misc. local control, enable interrupts on attentions */
1493         CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1494
1495 #ifdef notdef
1496         /* Assert GPIO pins for PHY reset */
1497         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1498             BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1499         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1500             BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1501 #endif
1502
1503         /* Turn on DMA completion state machine */
1504         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1505             sc->bge_asicrev != BGE_ASICREV_BCM5750)
1506                 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1507
1508         /* Turn on write DMA state machine */
1509         CSR_WRITE_4(sc, BGE_WDMA_MODE,
1510             BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1511         
1512         /* Turn on read DMA state machine */
1513         CSR_WRITE_4(sc, BGE_RDMA_MODE,
1514             BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1515
1516         /* Turn on RX data completion state machine */
1517         CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1518
1519         /* Turn on RX BD initiator state machine */
1520         CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1521
1522         /* Turn on RX data and RX BD initiator state machine */
1523         CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1524
1525         /* Turn on Mbuf cluster free state machine */
1526         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1527             sc->bge_asicrev != BGE_ASICREV_BCM5750)
1528                 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1529
1530         /* Turn on send BD completion state machine */
1531         CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1532
1533         /* Turn on send data completion state machine */
1534         CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1535
1536         /* Turn on send data initiator state machine */
1537         CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1538
1539         /* Turn on send BD initiator state machine */
1540         CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1541
1542         /* Turn on send BD selector state machine */
1543         CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1544
1545         CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1546         CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1547             BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1548
1549         /* ack/clear link change events */
1550         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1551             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1552             BGE_MACSTAT_LINK_CHANGED);
1553         CSR_WRITE_4(sc, BGE_MI_STS, 0);
1554
1555         /* Enable PHY auto polling (for MII/GMII only) */
1556         if (sc->bge_tbi) {
1557                 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1558         } else {
1559                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1560                 if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1561                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1562                             BGE_EVTENB_MI_INTERRUPT);
1563         }
1564
1565         /* Enable link state change attentions. */
1566         BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1567
1568         return(0);
1569 }
1570
1571 /*
1572  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1573  * against our list and return its name if we find a match. Note
1574  * that since the Broadcom controller contains VPD support, we
1575  * can get the device name string from the controller itself instead
1576  * of the compiled-in string. This is a little slow, but it guarantees
1577  * we'll always announce the right product name.
1578  */
1579 static int
1580 bge_probe(device_t dev)
1581 {
1582         struct bge_softc *sc;
1583         struct bge_type *t;
1584         char *descbuf;
1585         uint16_t product, vendor;
1586
1587         product = pci_get_device(dev);
1588         vendor = pci_get_vendor(dev);
1589
1590         for (t = bge_devs; t->bge_name != NULL; t++) {
1591                 if (vendor == t->bge_vid && product == t->bge_did)
1592                         break;
1593         }
1594
1595         if (t->bge_name == NULL)
1596                 return(ENXIO);
1597
1598         sc = device_get_softc(dev);
1599 #ifdef notdef
1600         sc->bge_dev = dev;
1601
1602         bge_vpd_read(sc);
1603         device_set_desc(dev, sc->bge_vpd_prodname);
1604 #endif
1605         descbuf = kmalloc(BGE_DEVDESC_MAX, M_TEMP, M_WAITOK);
1606         ksnprintf(descbuf, BGE_DEVDESC_MAX, "%s, ASIC rev. %#04x", t->bge_name,
1607             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1608         device_set_desc_copy(dev, descbuf);
1609         if (pci_get_subvendor(dev) == PCI_VENDOR_DELL)
1610                 sc->bge_no_3_led = 1;
1611         kfree(descbuf, M_TEMP);
1612         return(0);
1613 }
1614
1615 static int
1616 bge_attach(device_t dev)
1617 {
1618         struct ifnet *ifp;
1619         struct bge_softc *sc;
1620         uint32_t hwcfg = 0;
1621         uint32_t mac_addr = 0;
1622         int error = 0, rid;
1623         uint8_t ether_addr[ETHER_ADDR_LEN];
1624
1625         sc = device_get_softc(dev);
1626         sc->bge_dev = dev;
1627         callout_init(&sc->bge_stat_timer);
1628         lwkt_serialize_init(&sc->bge_jslot_serializer);
1629
1630         /*
1631          * Map control/status registers.
1632          */
1633         pci_enable_busmaster(dev);
1634
1635         rid = BGE_PCI_BAR0;
1636         sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1637             RF_ACTIVE);
1638
1639         if (sc->bge_res == NULL) {
1640                 device_printf(dev, "couldn't map memory\n");
1641                 error = ENXIO;
1642                 return(error);
1643         }
1644
1645         sc->bge_btag = rman_get_bustag(sc->bge_res);
1646         sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
1647
1648         /* Allocate interrupt */
1649         rid = 0;
1650
1651         sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1652             RF_SHAREABLE | RF_ACTIVE);
1653
1654         if (sc->bge_irq == NULL) {
1655                 device_printf(dev, "couldn't map interrupt\n");
1656                 error = ENXIO;
1657                 goto fail;
1658         }
1659
1660         /* Save ASIC rev. */
1661         sc->bge_chipid =
1662             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1663             BGE_PCIMISCCTL_ASICREV;
1664         sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
1665         sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
1666
1667         /*
1668          * Treat the 5714 and the 5752 like the 5750 until we have more info
1669          * on this chip.
1670          */
1671         if (sc->bge_asicrev == BGE_ASICREV_BCM5714 ||
1672             sc->bge_asicrev == BGE_ASICREV_BCM5752)
1673                 sc->bge_asicrev = BGE_ASICREV_BCM5750;
1674
1675         /*
1676          * XXX: Broadcom Linux driver.  Not in specs or eratta.
1677          * PCI-Express?
1678          */
1679         if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
1680                 uint32_t v;
1681
1682                 v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
1683                 if (((v >> 8) & 0xff) == BGE_PCIE_MSI_CAPID) {
1684                         v = pci_read_config(dev, BGE_PCIE_MSI_CAPID, 4);
1685                         if ((v & 0xff) == BGE_PCIE_MSI_CAPID_VAL)
1686                                 sc->bge_pcie = 1;
1687                 }
1688         }
1689
1690         ifp = &sc->arpcom.ac_if;
1691         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1692
1693         /* Try to reset the chip. */
1694         bge_reset(sc);
1695
1696         if (bge_chipinit(sc)) {
1697                 device_printf(dev, "chip initialization failed\n");
1698                 error = ENXIO;
1699                 goto fail;
1700         }
1701
1702         /*
1703          * Get station address from the EEPROM.
1704          */
1705         mac_addr = bge_readmem_ind(sc, 0x0c14);
1706         if ((mac_addr >> 16) == 0x484b) {
1707                 ether_addr[0] = (uint8_t)(mac_addr >> 8);
1708                 ether_addr[1] = (uint8_t)mac_addr;
1709                 mac_addr = bge_readmem_ind(sc, 0x0c18);
1710                 ether_addr[2] = (uint8_t)(mac_addr >> 24);
1711                 ether_addr[3] = (uint8_t)(mac_addr >> 16);
1712                 ether_addr[4] = (uint8_t)(mac_addr >> 8);
1713                 ether_addr[5] = (uint8_t)mac_addr;
1714         } else if (bge_read_eeprom(sc, ether_addr,
1715             BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
1716                 device_printf(dev, "failed to read station address\n");
1717                 error = ENXIO;
1718                 goto fail;
1719         }
1720
1721         /* 5705/5750 limits RX return ring to 512 entries. */
1722         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1723             sc->bge_asicrev == BGE_ASICREV_BCM5750)
1724                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1725         else
1726                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
1727
1728         error = bge_dma_alloc(sc);
1729         if (error)
1730                 goto fail;
1731
1732         /* Set default tuneable values. */
1733         sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
1734         sc->bge_rx_coal_ticks = 150;
1735         sc->bge_tx_coal_ticks = 150;
1736         sc->bge_rx_max_coal_bds = 64;
1737         sc->bge_tx_max_coal_bds = 128;
1738
1739         /* Set up ifnet structure */
1740         ifp->if_softc = sc;
1741         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1742         ifp->if_ioctl = bge_ioctl;
1743         ifp->if_start = bge_start;
1744         ifp->if_watchdog = bge_watchdog;
1745         ifp->if_init = bge_init;
1746         ifp->if_mtu = ETHERMTU;
1747         ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1748         ifq_set_ready(&ifp->if_snd);
1749         ifp->if_hwassist = BGE_CSUM_FEATURES;
1750         ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
1751             IFCAP_VLAN_MTU;
1752         ifp->if_capenable = ifp->if_capabilities;
1753
1754         /*
1755          * Figure out what sort of media we have by checking the
1756          * hardware config word in the first 32k of NIC internal memory,
1757          * or fall back to examining the EEPROM if necessary.
1758          * Note: on some BCM5700 cards, this value appears to be unset.
1759          * If that's the case, we have to rely on identifying the NIC
1760          * by its PCI subsystem ID, as we do below for the SysKonnect
1761          * SK-9D41.
1762          */
1763         if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
1764                 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
1765         else {
1766                 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
1767                                     sizeof(hwcfg))) {
1768                         device_printf(dev, "failed to read EEPROM\n");
1769                         error = ENXIO;
1770                         goto fail;
1771                 }
1772                 hwcfg = ntohl(hwcfg);
1773         }
1774
1775         if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
1776                 sc->bge_tbi = 1;
1777
1778         /* The SysKonnect SK-9D41 is a 1000baseSX card. */
1779         if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41)
1780                 sc->bge_tbi = 1;
1781
1782         if (sc->bge_tbi) {
1783                 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
1784                     bge_ifmedia_upd, bge_ifmedia_sts);
1785                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
1786                 ifmedia_add(&sc->bge_ifmedia,
1787                     IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
1788                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1789                 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
1790                 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
1791         } else {
1792                 /*
1793                  * Do transceiver setup.
1794                  */
1795                 if (mii_phy_probe(dev, &sc->bge_miibus,
1796                     bge_ifmedia_upd, bge_ifmedia_sts)) {
1797                         device_printf(dev, "MII without any PHY!\n");
1798                         error = ENXIO;
1799                         goto fail;
1800                 }
1801         }
1802
1803         /*
1804          * When using the BCM5701 in PCI-X mode, data corruption has
1805          * been observed in the first few bytes of some received packets.
1806          * Aligning the packet buffer in memory eliminates the corruption.
1807          * Unfortunately, this misaligns the packet payloads.  On platforms
1808          * which do not support unaligned accesses, we will realign the
1809          * payloads by copying the received packets.
1810          */
1811         switch (sc->bge_chipid) {
1812         case BGE_CHIPID_BCM5701_A0:
1813         case BGE_CHIPID_BCM5701_B0:
1814         case BGE_CHIPID_BCM5701_B2:
1815         case BGE_CHIPID_BCM5701_B5:
1816                 /* If in PCI-X mode, work around the alignment bug. */
1817                 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
1818                     (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
1819                     BGE_PCISTATE_PCI_BUSSPEED)
1820                         sc->bge_rx_alignment_bug = 1;
1821                 break;
1822         }
1823
1824         /*
1825          * Call MI attach routine.
1826          */
1827         ether_ifattach(ifp, ether_addr, NULL);
1828
1829         error = bus_setup_intr(dev, sc->bge_irq, INTR_NETSAFE,
1830                                bge_intr, sc, &sc->bge_intrhand, 
1831                                ifp->if_serializer);
1832         if (error) {
1833                 ether_ifdetach(ifp);
1834                 device_printf(dev, "couldn't set up irq\n");
1835                 goto fail;
1836         }
1837         return(0);
1838 fail:
1839         bge_detach(dev);
1840         return(error);
1841 }
1842
1843 static int
1844 bge_detach(device_t dev)
1845 {
1846         struct bge_softc *sc = device_get_softc(dev);
1847         struct ifnet *ifp = &sc->arpcom.ac_if;
1848
1849         if (device_is_attached(dev)) {
1850                 lwkt_serialize_enter(ifp->if_serializer);
1851                 bge_stop(sc);
1852                 bge_reset(sc);
1853                 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
1854                 lwkt_serialize_exit(ifp->if_serializer);
1855
1856                 ether_ifdetach(ifp);
1857         }
1858         if (sc->bge_tbi)
1859                 ifmedia_removeall(&sc->bge_ifmedia);
1860         if (sc->bge_miibus)
1861                 device_delete_child(dev, sc->bge_miibus);
1862         bus_generic_detach(dev);
1863
1864         bge_release_resources(sc);
1865         bge_dma_free(sc);
1866
1867         return 0;
1868 }
1869
1870 static void
1871 bge_release_resources(struct bge_softc *sc)
1872 {
1873         device_t dev;
1874
1875         dev = sc->bge_dev;
1876
1877         if (sc->bge_vpd_prodname != NULL)
1878                 kfree(sc->bge_vpd_prodname, M_DEVBUF);
1879
1880         if (sc->bge_vpd_readonly != NULL)
1881                 kfree(sc->bge_vpd_readonly, M_DEVBUF);
1882
1883         if (sc->bge_irq != NULL)
1884                 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
1885
1886         if (sc->bge_res != NULL)
1887                 bus_release_resource(dev, SYS_RES_MEMORY,
1888                     BGE_PCI_BAR0, sc->bge_res);
1889 }
1890
1891 static void
1892 bge_reset(struct bge_softc *sc)
1893 {
1894         device_t dev;
1895         uint32_t cachesize, command, pcistate, reset;
1896         int i, val = 0;
1897
1898         dev = sc->bge_dev;
1899
1900         /* Save some important PCI state. */
1901         cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
1902         command = pci_read_config(dev, BGE_PCI_CMD, 4);
1903         pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
1904
1905         pci_write_config(dev, BGE_PCI_MISC_CTL,
1906             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1907             BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1908
1909         reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
1910
1911         /* XXX: Broadcom Linux driver. */
1912         if (sc->bge_pcie) {
1913                 if (CSR_READ_4(sc, 0x7e2c) == 0x60)     /* PCIE 1.0 */
1914                         CSR_WRITE_4(sc, 0x7e2c, 0x20);
1915                 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
1916                         /* Prevent PCIE link training during global reset */
1917                         CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
1918                         reset |= (1<<29);
1919                 }
1920         }
1921
1922         /* Issue global reset */
1923         bge_writereg_ind(sc, BGE_MISC_CFG, reset);
1924
1925         DELAY(1000);
1926
1927         /* XXX: Broadcom Linux driver. */
1928         if (sc->bge_pcie) {
1929                 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
1930                         uint32_t v;
1931
1932                         DELAY(500000); /* wait for link training to complete */
1933                         v = pci_read_config(dev, 0xc4, 4);
1934                         pci_write_config(dev, 0xc4, v | (1<<15), 4);
1935                 }
1936                 /* Set PCIE max payload size and clear error status. */
1937                 pci_write_config(dev, 0xd8, 0xf5000, 4);
1938         }
1939
1940         /* Reset some of the PCI state that got zapped by reset */
1941         pci_write_config(dev, BGE_PCI_MISC_CTL,
1942             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1943             BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1944         pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
1945         pci_write_config(dev, BGE_PCI_CMD, command, 4);
1946         bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
1947
1948         /* Enable memory arbiter. */
1949         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1950                 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
1951
1952         /*
1953          * Prevent PXE restart: write a magic number to the
1954          * general communications memory at 0xB50.
1955          */
1956         bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1957         /*
1958          * Poll the value location we just wrote until
1959          * we see the 1's complement of the magic number.
1960          * This indicates that the firmware initialization
1961          * is complete.
1962          */
1963         for (i = 0; i < BGE_TIMEOUT; i++) {
1964                 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
1965                 if (val == ~BGE_MAGIC_NUMBER)
1966                         break;
1967                 DELAY(10);
1968         }
1969         
1970         if (i == BGE_TIMEOUT) {
1971                 if_printf(&sc->arpcom.ac_if, "firmware handshake timed out\n");
1972                 return;
1973         }
1974
1975         /*
1976          * XXX Wait for the value of the PCISTATE register to
1977          * return to its original pre-reset state. This is a
1978          * fairly good indicator of reset completion. If we don't
1979          * wait for the reset to fully complete, trying to read
1980          * from the device's non-PCI registers may yield garbage
1981          * results.
1982          */
1983         for (i = 0; i < BGE_TIMEOUT; i++) {
1984                 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
1985                         break;
1986                 DELAY(10);
1987         }
1988
1989         /* Fix up byte swapping */
1990         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
1991             BGE_MODECTL_BYTESWAP_DATA);
1992
1993         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1994
1995         /*
1996          * The 5704 in TBI mode apparently needs some special
1997          * adjustment to insure the SERDES drive level is set
1998          * to 1.2V.
1999          */
2000         if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) {
2001                 uint32_t serdescfg;
2002
2003                 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2004                 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2005                 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2006         }
2007
2008         /* XXX: Broadcom Linux driver. */
2009         if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2010                 uint32_t v;
2011
2012                 v = CSR_READ_4(sc, 0x7c00);
2013                 CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2014         }
2015
2016         DELAY(10000);
2017 }
2018
2019 /*
2020  * Frame reception handling. This is called if there's a frame
2021  * on the receive return list.
2022  *
2023  * Note: we have to be able to handle two possibilities here:
2024  * 1) the frame is from the jumbo recieve ring
2025  * 2) the frame is from the standard receive ring
2026  */
2027
2028 static void
2029 bge_rxeof(struct bge_softc *sc)
2030 {
2031         struct ifnet *ifp;
2032         int stdcnt = 0, jumbocnt = 0;
2033
2034         if (sc->bge_rx_saved_considx ==
2035             sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx)
2036                 return;
2037
2038         ifp = &sc->arpcom.ac_if;
2039
2040         bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2041                         sc->bge_cdata.bge_rx_return_ring_map,
2042                         BUS_DMASYNC_POSTREAD);
2043         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2044                         sc->bge_cdata.bge_rx_std_ring_map,
2045                         BUS_DMASYNC_POSTREAD);
2046         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2047             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2048                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2049                                 sc->bge_cdata.bge_rx_jumbo_ring_map,
2050                                 BUS_DMASYNC_POSTREAD);
2051         }
2052
2053         while (sc->bge_rx_saved_considx !=
2054                sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2055                 struct bge_rx_bd        *cur_rx;
2056                 uint32_t                rxidx;
2057                 struct mbuf             *m = NULL;
2058                 uint16_t                vlan_tag = 0;
2059                 int                     have_tag = 0;
2060
2061                 cur_rx =
2062             &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2063
2064                 rxidx = cur_rx->bge_idx;
2065                 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2066
2067                 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2068                         have_tag = 1;
2069                         vlan_tag = cur_rx->bge_vlan_tag;
2070                 }
2071
2072                 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2073                         BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2074                         m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2075                         sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2076                         jumbocnt++;
2077                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2078                                 ifp->if_ierrors++;
2079                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2080                                 continue;
2081                         }
2082                         if (bge_newbuf_jumbo(sc,
2083                             sc->bge_jumbo, NULL) == ENOBUFS) {
2084                                 ifp->if_ierrors++;
2085                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2086                                 continue;
2087                         }
2088                 } else {
2089                         BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2090                         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2091                                         sc->bge_cdata.bge_rx_std_dmamap[rxidx],
2092                                         BUS_DMASYNC_POSTREAD);
2093                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2094                                 sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
2095                         m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2096                         sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2097                         stdcnt++;
2098                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2099                                 ifp->if_ierrors++;
2100                                 bge_newbuf_std(sc, sc->bge_std, m);
2101                                 continue;
2102                         }
2103                         if (bge_newbuf_std(sc, sc->bge_std,
2104                             NULL) == ENOBUFS) {
2105                                 ifp->if_ierrors++;
2106                                 bge_newbuf_std(sc, sc->bge_std, m);
2107                                 continue;
2108                         }
2109                 }
2110
2111                 ifp->if_ipackets++;
2112 #ifndef __i386__
2113                 /*
2114                  * The i386 allows unaligned accesses, but for other
2115                  * platforms we must make sure the payload is aligned.
2116                  */
2117                 if (sc->bge_rx_alignment_bug) {
2118                         bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2119                             cur_rx->bge_len);
2120                         m->m_data += ETHER_ALIGN;
2121                 }
2122 #endif
2123                 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2124                 m->m_pkthdr.rcvif = ifp;
2125
2126 #if 0 /* currently broken for some packets, possibly related to TCP options */
2127                 if (ifp->if_hwassist) {
2128                         m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2129                         if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2130                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2131                         if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2132                                 m->m_pkthdr.csum_data =
2133                                     cur_rx->bge_tcp_udp_csum;
2134                                 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
2135                         }
2136                 }
2137 #endif
2138
2139                 /*
2140                  * If we received a packet with a vlan tag, pass it
2141                  * to vlan_input() instead of ether_input().
2142                  */
2143                 if (have_tag) {
2144                         VLAN_INPUT_TAG(m, vlan_tag);
2145                         have_tag = vlan_tag = 0;
2146                 } else {
2147                         ifp->if_input(ifp, m);
2148                 }
2149         }
2150
2151         if (stdcnt > 0) {
2152                 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2153                                 sc->bge_cdata.bge_rx_std_ring_map,
2154                                 BUS_DMASYNC_PREWRITE);
2155         }
2156
2157         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2158             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2159                 if (jumbocnt > 0) {
2160                         bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2161                                         sc->bge_cdata.bge_rx_jumbo_ring_map,
2162                                         BUS_DMASYNC_PREWRITE);
2163                 }
2164         }
2165
2166         CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2167         if (stdcnt)
2168                 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2169         if (jumbocnt)
2170                 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2171 }
2172
2173 static void
2174 bge_txeof(struct bge_softc *sc)
2175 {
2176         struct bge_tx_bd *cur_tx = NULL;
2177         struct ifnet *ifp;
2178
2179         if (sc->bge_tx_saved_considx ==
2180             sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
2181                 return;
2182
2183         ifp = &sc->arpcom.ac_if;
2184
2185         bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
2186                         sc->bge_cdata.bge_tx_ring_map,
2187                         BUS_DMASYNC_POSTREAD);
2188
2189         /*
2190          * Go through our tx ring and free mbufs for those
2191          * frames that have been sent.
2192          */
2193         while (sc->bge_tx_saved_considx !=
2194                sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2195                 uint32_t idx = 0;
2196
2197                 idx = sc->bge_tx_saved_considx;
2198                 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2199                 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2200                         ifp->if_opackets++;
2201                 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2202                         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2203                                         sc->bge_cdata.bge_tx_dmamap[idx],
2204                                         BUS_DMASYNC_POSTWRITE);
2205                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2206                             sc->bge_cdata.bge_tx_dmamap[idx]);
2207                         m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2208                         sc->bge_cdata.bge_tx_chain[idx] = NULL;
2209                 }
2210                 sc->bge_txcnt--;
2211                 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2212                 ifp->if_timer = 0;
2213         }
2214
2215         if (cur_tx != NULL &&
2216             (BGE_TX_RING_CNT - sc->bge_txcnt) >=
2217             (BGE_NSEG_RSVD + BGE_NSEG_SPARE))
2218                 ifp->if_flags &= ~IFF_OACTIVE;
2219
2220         if (!ifq_is_empty(&ifp->if_snd))
2221                 ifp->if_start(ifp);
2222 }
2223
2224 static void
2225 bge_intr(void *xsc)
2226 {
2227         struct bge_softc *sc = xsc;
2228         struct ifnet *ifp = &sc->arpcom.ac_if;
2229         uint32_t status, statusword, mimode;
2230
2231         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2232                         sc->bge_cdata.bge_status_map,
2233                         BUS_DMASYNC_POSTREAD);
2234
2235         /* XXX */
2236         statusword = loadandclear(&sc->bge_ldata.bge_status_block->bge_status);
2237
2238 #ifdef notdef
2239         /* Avoid this for now -- checking this register is expensive. */
2240         /* Make sure this is really our interrupt. */
2241         if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2242                 return;
2243 #endif
2244         /* Ack interrupt and stop others from occuring. */
2245         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2246
2247         /*
2248          * Process link state changes.
2249          * Grrr. The link status word in the status block does
2250          * not work correctly on the BCM5700 rev AX and BX chips,
2251          * according to all available information. Hence, we have
2252          * to enable MII interrupts in order to properly obtain
2253          * async link changes. Unfortunately, this also means that
2254          * we have to read the MAC status register to detect link
2255          * changes, thereby adding an additional register access to
2256          * the interrupt handler.
2257          */
2258
2259         if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
2260                 status = CSR_READ_4(sc, BGE_MAC_STS);
2261                 if (status & BGE_MACSTAT_MI_INTERRUPT) {
2262                         sc->bge_link = 0;
2263                         callout_stop(&sc->bge_stat_timer);
2264                         bge_tick_serialized(sc);
2265                         /* Clear the interrupt */
2266                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2267                             BGE_EVTENB_MI_INTERRUPT);
2268                         bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
2269                         bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
2270                             BRGPHY_INTRS);
2271                 }
2272         } else {
2273                 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) {
2274                         /*
2275                          * Sometimes PCS encoding errors are detected in
2276                          * TBI mode (on fiber NICs), and for some reason
2277                          * the chip will signal them as link changes.
2278                          * If we get a link change event, but the 'PCS
2279                          * encoding error' bit in the MAC status register
2280                          * is set, don't bother doing a link check.
2281                          * This avoids spurious "gigabit link up" messages
2282                          * that sometimes appear on fiber NICs during
2283                          * periods of heavy traffic. (There should be no
2284                          * effect on copper NICs.)
2285                          *
2286                          * If we do have a copper NIC (bge_tbi == 0) then
2287                          * check that the AUTOPOLL bit is set before
2288                          * processing the event as a real link change.
2289                          * Turning AUTOPOLL on and off in the MII read/write
2290                          * functions will often trigger a link status
2291                          * interrupt for no reason.
2292                          */
2293                         status = CSR_READ_4(sc, BGE_MAC_STS);
2294                         mimode = CSR_READ_4(sc, BGE_MI_MODE);
2295                         if (!(status & (BGE_MACSTAT_PORT_DECODE_ERROR |
2296                                         BGE_MACSTAT_MI_COMPLETE)) &&
2297                             (!sc->bge_tbi && (mimode & BGE_MIMODE_AUTOPOLL))) {
2298                                 sc->bge_link = 0;
2299                                 callout_stop(&sc->bge_stat_timer);
2300                                 bge_tick_serialized(sc);
2301                         }
2302                         sc->bge_link = 0;
2303                         callout_stop(&sc->bge_stat_timer);
2304                         bge_tick_serialized(sc);
2305                         /* Clear the interrupt */
2306                         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
2307                             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
2308                             BGE_MACSTAT_LINK_CHANGED);
2309
2310                         /* Force flush the status block cached by PCI bridge */
2311                         CSR_READ_4(sc, BGE_MBX_IRQ0_LO);
2312                 }
2313         }
2314
2315         if (ifp->if_flags & IFF_RUNNING) {
2316                 /* Check RX return ring producer/consumer */
2317                 bge_rxeof(sc);
2318
2319                 /* Check TX ring producer/consumer */
2320                 bge_txeof(sc);
2321         }
2322
2323         bge_handle_events(sc);
2324
2325         /* Re-enable interrupts. */
2326         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2327 }
2328
2329 static void
2330 bge_tick(void *xsc)
2331 {
2332         struct bge_softc *sc = xsc;
2333         struct ifnet *ifp = &sc->arpcom.ac_if;
2334
2335         lwkt_serialize_enter(ifp->if_serializer);
2336         bge_tick_serialized(xsc);
2337         lwkt_serialize_exit(ifp->if_serializer);
2338 }
2339
2340 static void
2341 bge_tick_serialized(void *xsc)
2342 {
2343         struct bge_softc *sc = xsc;
2344         struct ifnet *ifp = &sc->arpcom.ac_if;
2345         struct mii_data *mii = NULL;
2346         struct ifmedia *ifm = NULL;
2347
2348         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2349             sc->bge_asicrev == BGE_ASICREV_BCM5750)
2350                 bge_stats_update_regs(sc);
2351         else
2352                 bge_stats_update(sc);
2353
2354         callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2355
2356         if (sc->bge_link) {
2357                 return;
2358         }
2359
2360         if (sc->bge_tbi) {
2361                 ifm = &sc->bge_ifmedia;
2362                 if (CSR_READ_4(sc, BGE_MAC_STS) &
2363                     BGE_MACSTAT_TBI_PCS_SYNCHED) {
2364                         sc->bge_link++;
2365                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
2366                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
2367                                            BGE_MACMODE_TBI_SEND_CFGS);
2368                         }
2369                         CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
2370                         if_printf(ifp, "gigabit link up\n");
2371                         if (!ifq_is_empty(&ifp->if_snd))
2372                                 ifp->if_start(ifp);
2373                 }
2374                 return;
2375         }
2376
2377         mii = device_get_softc(sc->bge_miibus);
2378         mii_tick(mii);
2379  
2380         if (!sc->bge_link) {
2381                 mii_pollstat(mii);
2382                 if (mii->mii_media_status & IFM_ACTIVE &&
2383                     IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2384                         sc->bge_link++;
2385                         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
2386                             IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
2387                                 if_printf(ifp, "gigabit link up\n");
2388                         if (!ifq_is_empty(&ifp->if_snd))
2389                                 ifp->if_start(ifp);
2390                 }
2391         }
2392 }
2393
2394 static void
2395 bge_stats_update_regs(struct bge_softc *sc)
2396 {
2397         struct ifnet *ifp = &sc->arpcom.ac_if;
2398         struct bge_mac_stats_regs stats;
2399         uint32_t *s;
2400         int i;
2401
2402         s = (uint32_t *)&stats;
2403         for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2404                 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2405                 s++;
2406         }
2407
2408         ifp->if_collisions +=
2409            (stats.dot3StatsSingleCollisionFrames +
2410            stats.dot3StatsMultipleCollisionFrames +
2411            stats.dot3StatsExcessiveCollisions +
2412            stats.dot3StatsLateCollisions) -
2413            ifp->if_collisions;
2414 }
2415
2416 static void
2417 bge_stats_update(struct bge_softc *sc)
2418 {
2419         struct ifnet *ifp = &sc->arpcom.ac_if;
2420         bus_size_t stats;
2421
2422         stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2423
2424 #define READ_STAT(sc, stats, stat)      \
2425         CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2426
2427         ifp->if_collisions +=
2428            (READ_STAT(sc, stats,
2429                 txstats.dot3StatsSingleCollisionFrames.bge_addr_lo) +
2430             READ_STAT(sc, stats,
2431                 txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo) +
2432             READ_STAT(sc, stats,
2433                 txstats.dot3StatsExcessiveCollisions.bge_addr_lo) +
2434             READ_STAT(sc, stats,
2435                 txstats.dot3StatsLateCollisions.bge_addr_lo)) -
2436            ifp->if_collisions;
2437
2438 #undef READ_STAT
2439
2440 #ifdef notdef
2441         ifp->if_collisions +=
2442            (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2443            sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2444            sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2445            sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2446            ifp->if_collisions;
2447 #endif
2448 }
2449
2450 /*
2451  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
2452  * pointers to descriptors.
2453  */
2454 static int
2455 bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx)
2456 {
2457         struct bge_tx_bd *d = NULL;
2458         uint16_t csum_flags = 0;
2459         struct ifvlan *ifv = NULL;
2460         struct bge_dmamap_arg ctx;
2461         bus_dma_segment_t segs[BGE_NSEG_NEW];
2462         bus_dmamap_t map;
2463         int error, maxsegs, idx, i;
2464
2465         if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
2466             m_head->m_pkthdr.rcvif != NULL &&
2467             m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
2468                 ifv = m_head->m_pkthdr.rcvif->if_softc;
2469
2470         if (m_head->m_pkthdr.csum_flags) {
2471                 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2472                         csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2473                 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2474                         csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2475                 if (m_head->m_flags & M_LASTFRAG)
2476                         csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2477                 else if (m_head->m_flags & M_FRAG)
2478                         csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2479         }
2480
2481         idx = *txidx;
2482         map = sc->bge_cdata.bge_tx_dmamap[idx];
2483
2484         maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - BGE_NSEG_RSVD;
2485         KASSERT(maxsegs >= BGE_NSEG_SPARE,
2486                 ("not enough segments %d\n", maxsegs));
2487
2488         if (maxsegs > BGE_NSEG_NEW)
2489                 maxsegs = BGE_NSEG_NEW;
2490
2491         ctx.bge_segs = segs;
2492         ctx.bge_maxsegs = maxsegs;
2493         error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map, m_head,
2494                                      bge_dma_map_mbuf, &ctx, BUS_DMA_NOWAIT);
2495         if (error == E2BIG || ctx.bge_maxsegs == 0) {
2496                 struct mbuf *m_new;
2497
2498                 m_new = m_defrag(m_head, MB_DONTWAIT);
2499                 if (m_new == NULL) {
2500                         if_printf(&sc->arpcom.ac_if,
2501                                   "could not defrag TX mbuf\n");
2502                         error = ENOBUFS;
2503                         goto back;
2504                 } else {
2505                         m_head = m_new;
2506                 }
2507
2508                 ctx.bge_segs = segs;
2509                 ctx.bge_maxsegs = maxsegs;
2510                 error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map,
2511                                              m_head, bge_dma_map_mbuf, &ctx,
2512                                              BUS_DMA_NOWAIT);
2513                 if (error || ctx.bge_maxsegs == 0) {
2514                         if_printf(&sc->arpcom.ac_if,
2515                                   "could not defrag TX mbuf\n");
2516                         if (error == 0)
2517                                 error = E2BIG;
2518                         goto back;
2519                 }
2520         } else if (error) {
2521                 if_printf(&sc->arpcom.ac_if, "could not map TX mbuf\n");
2522                 goto back;
2523         }
2524
2525         bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE);
2526
2527         for (i = 0; ; i++) {
2528                 d = &sc->bge_ldata.bge_tx_ring[idx];
2529
2530                 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_segs[i].ds_addr);
2531                 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_segs[i].ds_addr);
2532                 d->bge_len = segs[i].ds_len;
2533                 d->bge_flags = csum_flags;
2534
2535                 if (i == ctx.bge_maxsegs - 1)
2536                         break;
2537                 BGE_INC(idx, BGE_TX_RING_CNT);
2538         }
2539         /* Mark the last segment as end of packet... */
2540         d->bge_flags |= BGE_TXBDFLAG_END;
2541
2542         /* Set vlan tag to the first segment of the packet. */
2543         d = &sc->bge_ldata.bge_tx_ring[*txidx];
2544         if (ifv != NULL) {
2545                 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2546                 d->bge_vlan_tag = ifv->ifv_tag;
2547         } else {
2548                 d->bge_vlan_tag = 0;
2549         }
2550
2551         /*
2552          * Insure that the map for this transmission is placed at
2553          * the array index of the last descriptor in this chain.
2554          */
2555         sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
2556         sc->bge_cdata.bge_tx_dmamap[idx] = map;
2557         sc->bge_cdata.bge_tx_chain[idx] = m_head;
2558         sc->bge_txcnt += ctx.bge_maxsegs;
2559
2560         BGE_INC(idx, BGE_TX_RING_CNT);
2561         *txidx = idx;
2562 back:
2563         if (error)
2564                 m_freem(m_head);
2565         return error;
2566 }
2567
2568 /*
2569  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2570  * to the mbuf data regions directly in the transmit descriptors.
2571  */
2572 static void
2573 bge_start(struct ifnet *ifp)
2574 {
2575         struct bge_softc *sc = ifp->if_softc;
2576         struct mbuf *m_head = NULL;
2577         uint32_t prodidx;
2578         int need_trans;
2579
2580         if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING ||
2581             !sc->bge_link)
2582                 return;
2583
2584         prodidx = sc->bge_tx_prodidx;
2585
2586         need_trans = 0;
2587         while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
2588                 m_head = ifq_poll(&ifp->if_snd);
2589                 if (m_head == NULL)
2590                         break;
2591
2592                 /*
2593                  * XXX
2594                  * safety overkill.  If this is a fragmented packet chain
2595                  * with delayed TCP/UDP checksums, then only encapsulate
2596                  * it if we have enough descriptors to handle the entire
2597                  * chain at once.
2598                  * (paranoia -- may not actually be needed)
2599                  */
2600                 if (m_head->m_flags & M_FIRSTFRAG &&
2601                     m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
2602                         if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2603                             m_head->m_pkthdr.csum_data + 16) {
2604                                 ifp->if_flags |= IFF_OACTIVE;
2605                                 break;
2606                         }
2607                 }
2608
2609                 /*
2610                  * Sanity check: avoid coming within BGE_NSEG_RSVD
2611                  * descriptors of the end of the ring.  Also make
2612                  * sure there are BGE_NSEG_SPARE descriptors for
2613                  * jumbo buffers' defragmentation.
2614                  */
2615                 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2616                     (BGE_NSEG_RSVD + BGE_NSEG_SPARE)) {
2617                         ifp->if_flags |= IFF_OACTIVE;
2618                         break;
2619                 }
2620
2621                 /*
2622                  * Dequeue the packet before encapsulation, since
2623                  * bge_encap() may free the packet if error happens.
2624                  */
2625                 ifq_dequeue(&ifp->if_snd, m_head);
2626
2627                 /*
2628                  * Pack the data into the transmit ring. If we
2629                  * don't have room, set the OACTIVE flag and wait
2630                  * for the NIC to drain the ring.
2631                  */
2632                 if (bge_encap(sc, m_head, &prodidx)) {
2633                         ifp->if_flags |= IFF_OACTIVE;
2634                         break;
2635                 }
2636                 need_trans = 1;
2637
2638                 BPF_MTAP(ifp, m_head);
2639         }
2640
2641         if (!need_trans)
2642                 return;
2643
2644         /* Transmit */
2645         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2646         /* 5700 b2 errata */
2647         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
2648                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2649
2650         sc->bge_tx_prodidx = prodidx;
2651
2652         /*
2653          * Set a timeout in case the chip goes out to lunch.
2654          */
2655         ifp->if_timer = 5;
2656 }
2657
2658 static void
2659 bge_init(void *xsc)
2660 {
2661         struct bge_softc *sc = xsc;
2662         struct ifnet *ifp = &sc->arpcom.ac_if;
2663         uint16_t *m;
2664
2665         ASSERT_SERIALIZED(ifp->if_serializer);
2666
2667         if (ifp->if_flags & IFF_RUNNING)
2668                 return;
2669
2670         /* Cancel pending I/O and flush buffers. */
2671         bge_stop(sc);
2672         bge_reset(sc);
2673         bge_chipinit(sc);
2674
2675         /*
2676          * Init the various state machines, ring
2677          * control blocks and firmware.
2678          */
2679         if (bge_blockinit(sc)) {
2680                 if_printf(ifp, "initialization failure\n");
2681                 return;
2682         }
2683
2684         /* Specify MTU. */
2685         CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
2686             ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
2687
2688         /* Load our MAC address. */
2689         m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
2690         CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2691         CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2692
2693         /* Enable or disable promiscuous mode as needed. */
2694         bge_setpromisc(sc);
2695
2696         /* Program multicast filter. */
2697         bge_setmulti(sc);
2698
2699         /* Init RX ring. */
2700         bge_init_rx_ring_std(sc);
2701
2702         /*
2703          * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
2704          * memory to insure that the chip has in fact read the first
2705          * entry of the ring.
2706          */
2707         if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
2708                 uint32_t                v, i;
2709                 for (i = 0; i < 10; i++) {
2710                         DELAY(20);
2711                         v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
2712                         if (v == (MCLBYTES - ETHER_ALIGN))
2713                                 break;
2714                 }
2715                 if (i == 10)
2716                         if_printf(ifp, "5705 A0 chip failed to load RX ring\n");
2717         }
2718
2719         /* Init jumbo RX ring. */
2720         if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2721                 bge_init_rx_ring_jumbo(sc);
2722
2723         /* Init our RX return ring index */
2724         sc->bge_rx_saved_considx = 0;
2725
2726         /* Init TX ring. */
2727         bge_init_tx_ring(sc);
2728
2729         /* Turn on transmitter */
2730         BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
2731
2732         /* Turn on receiver */
2733         BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2734
2735         /* Tell firmware we're alive. */
2736         BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2737
2738         /* Enable host interrupts. */
2739         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
2740         BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2741         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2742
2743         bge_ifmedia_upd(ifp);
2744
2745         ifp->if_flags |= IFF_RUNNING;
2746         ifp->if_flags &= ~IFF_OACTIVE;
2747
2748         callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2749 }
2750
2751 /*
2752  * Set media options.
2753  */
2754 static int
2755 bge_ifmedia_upd(struct ifnet *ifp)
2756 {
2757         struct bge_softc *sc = ifp->if_softc;
2758         struct ifmedia *ifm = &sc->bge_ifmedia;
2759         struct mii_data *mii;
2760
2761         /* If this is a 1000baseX NIC, enable the TBI port. */
2762         if (sc->bge_tbi) {
2763                 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2764                         return(EINVAL);
2765                 switch(IFM_SUBTYPE(ifm->ifm_media)) {
2766                 case IFM_AUTO:
2767                         /*
2768                          * The BCM5704 ASIC appears to have a special
2769                          * mechanism for programming the autoneg
2770                          * advertisement registers in TBI mode.
2771                          */
2772                         if (!bge_fake_autoneg &&
2773                             sc->bge_asicrev == BGE_ASICREV_BCM5704) {
2774                                 uint32_t sgdig;
2775
2776                                 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
2777                                 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
2778                                 sgdig |= BGE_SGDIGCFG_AUTO |
2779                                          BGE_SGDIGCFG_PAUSE_CAP |
2780                                          BGE_SGDIGCFG_ASYM_PAUSE;
2781                                 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
2782                                             sgdig | BGE_SGDIGCFG_SEND);
2783                                 DELAY(5);
2784                                 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
2785                         }
2786                         break;
2787                 case IFM_1000_SX:
2788                         if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
2789                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
2790                                     BGE_MACMODE_HALF_DUPLEX);
2791                         } else {
2792                                 BGE_SETBIT(sc, BGE_MAC_MODE,
2793                                     BGE_MACMODE_HALF_DUPLEX);
2794                         }
2795                         break;
2796                 default:
2797                         return(EINVAL);
2798                 }
2799                 return(0);
2800         }
2801
2802         mii = device_get_softc(sc->bge_miibus);
2803         sc->bge_link = 0;
2804         if (mii->mii_instance) {
2805                 struct mii_softc *miisc;
2806                 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2807                         mii_phy_reset(miisc);
2808         }
2809         mii_mediachg(mii);
2810
2811         return(0);
2812 }
2813
2814 /*
2815  * Report current media status.
2816  */
2817 static void
2818 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2819 {
2820         struct bge_softc *sc = ifp->if_softc;
2821         struct mii_data *mii;
2822
2823         if (sc->bge_tbi) {
2824                 ifmr->ifm_status = IFM_AVALID;
2825                 ifmr->ifm_active = IFM_ETHER;
2826                 if (CSR_READ_4(sc, BGE_MAC_STS) &
2827                     BGE_MACSTAT_TBI_PCS_SYNCHED)
2828                         ifmr->ifm_status |= IFM_ACTIVE;
2829                 ifmr->ifm_active |= IFM_1000_SX;
2830                 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
2831                         ifmr->ifm_active |= IFM_HDX;    
2832                 else
2833                         ifmr->ifm_active |= IFM_FDX;
2834                 return;
2835         }
2836
2837         mii = device_get_softc(sc->bge_miibus);
2838         mii_pollstat(mii);
2839         ifmr->ifm_active = mii->mii_media_active;
2840         ifmr->ifm_status = mii->mii_media_status;
2841 }
2842
2843 static int
2844 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
2845 {
2846         struct bge_softc *sc = ifp->if_softc;
2847         struct ifreq *ifr = (struct ifreq *) data;
2848         int mask, error = 0;
2849         struct mii_data *mii;
2850
2851         ASSERT_SERIALIZED(ifp->if_serializer);
2852
2853         switch(command) {
2854         case SIOCSIFMTU:
2855                 /* Disallow jumbo frames on 5705/5750. */
2856                 if (((sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2857                       sc->bge_asicrev == BGE_ASICREV_BCM5750) &&
2858                      ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
2859                         error = EINVAL;
2860                 else {
2861                         ifp->if_mtu = ifr->ifr_mtu;
2862                         ifp->if_flags &= ~IFF_RUNNING;
2863                         bge_init(sc);
2864                 }
2865                 break;
2866         case SIOCSIFFLAGS:
2867                 if (ifp->if_flags & IFF_UP) {
2868                         if (ifp->if_flags & IFF_RUNNING) {
2869                                 int flags = ifp->if_flags & sc->bge_if_flags;
2870
2871                                 /*
2872                                  * If only the state of the PROMISC flag
2873                                  * changed, then just use the 'set promisc
2874                                  * mode' command instead of reinitializing
2875                                  * the entire NIC. Doing a full re-init
2876                                  * means reloading the firmware and waiting
2877                                  * for it to start up, which may take a
2878                                  * second or two.  Similarly for ALLMULTI.
2879                                  */
2880                                 if (flags & IFF_PROMISC)
2881                                         bge_setpromisc(sc);
2882                                 if (flags & IFF_ALLMULTI)
2883                                         bge_setmulti(sc);
2884                         } else {
2885                                 bge_init(sc);
2886                         }
2887                 } else {
2888                         if (ifp->if_flags & IFF_RUNNING)
2889                                 bge_stop(sc);
2890                 }
2891                 sc->bge_if_flags = ifp->if_flags;
2892                 error = 0;
2893                 break;
2894         case SIOCADDMULTI:
2895         case SIOCDELMULTI:
2896                 if (ifp->if_flags & IFF_RUNNING) {
2897                         bge_setmulti(sc);
2898                         error = 0;
2899                 }
2900                 break;
2901         case SIOCSIFMEDIA:
2902         case SIOCGIFMEDIA:
2903                 if (sc->bge_tbi) {
2904                         error = ifmedia_ioctl(ifp, ifr,
2905                             &sc->bge_ifmedia, command);
2906                 } else {
2907                         mii = device_get_softc(sc->bge_miibus);
2908                         error = ifmedia_ioctl(ifp, ifr,
2909                             &mii->mii_media, command);
2910                 }
2911                 break;
2912         case SIOCSIFCAP:
2913                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2914                 if (mask & IFCAP_HWCSUM) {
2915                         if (IFCAP_HWCSUM & ifp->if_capenable)
2916                                 ifp->if_capenable &= ~IFCAP_HWCSUM;
2917                         else
2918                                 ifp->if_capenable |= IFCAP_HWCSUM;
2919                 }
2920                 error = 0;
2921                 break;
2922         default:
2923                 error = ether_ioctl(ifp, command, data);
2924                 break;
2925         }
2926         return(error);
2927 }
2928
2929 static void
2930 bge_watchdog(struct ifnet *ifp)
2931 {
2932         struct bge_softc *sc = ifp->if_softc;
2933
2934         if_printf(ifp, "watchdog timeout -- resetting\n");
2935
2936         ifp->if_flags &= ~IFF_RUNNING;
2937         bge_init(sc);
2938
2939         ifp->if_oerrors++;
2940
2941         if (!ifq_is_empty(&ifp->if_snd))
2942                 ifp->if_start(ifp);
2943 }
2944
2945 /*
2946  * Stop the adapter and free any mbufs allocated to the
2947  * RX and TX lists.
2948  */
2949 static void
2950 bge_stop(struct bge_softc *sc)
2951 {
2952         struct ifnet *ifp = &sc->arpcom.ac_if;
2953         struct ifmedia_entry *ifm;
2954         struct mii_data *mii = NULL;
2955         int mtmp, itmp;
2956
2957         ASSERT_SERIALIZED(ifp->if_serializer);
2958
2959         if (!sc->bge_tbi)
2960                 mii = device_get_softc(sc->bge_miibus);
2961
2962         callout_stop(&sc->bge_stat_timer);
2963
2964         /*
2965          * Disable all of the receiver blocks
2966          */
2967         BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2968         BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2969         BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2970         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2971             sc->bge_asicrev != BGE_ASICREV_BCM5750)
2972                 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2973         BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
2974         BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2975         BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
2976
2977         /*
2978          * Disable all of the transmit blocks
2979          */
2980         BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2981         BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2982         BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2983         BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
2984         BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
2985         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2986             sc->bge_asicrev != BGE_ASICREV_BCM5750)
2987                 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2988         BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2989
2990         /*
2991          * Shut down all of the memory managers and related
2992          * state machines.
2993          */
2994         BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
2995         BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
2996         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2997             sc->bge_asicrev != BGE_ASICREV_BCM5750)
2998                 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2999         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3000         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3001         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3002             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
3003                 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3004                 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3005         }
3006
3007         /* Disable host interrupts. */
3008         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3009         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3010
3011         /*
3012          * Tell firmware we're shutting down.
3013          */
3014         BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3015
3016         /* Free the RX lists. */
3017         bge_free_rx_ring_std(sc);
3018
3019         /* Free jumbo RX list. */
3020         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3021             sc->bge_asicrev != BGE_ASICREV_BCM5750)
3022                 bge_free_rx_ring_jumbo(sc);
3023
3024         /* Free TX buffers. */
3025         bge_free_tx_ring(sc);
3026
3027         /*
3028          * Isolate/power down the PHY, but leave the media selection
3029          * unchanged so that things will be put back to normal when
3030          * we bring the interface back up.
3031          */
3032         if (!sc->bge_tbi) {
3033                 itmp = ifp->if_flags;
3034                 ifp->if_flags |= IFF_UP;
3035                 ifm = mii->mii_media.ifm_cur;
3036                 mtmp = ifm->ifm_media;
3037                 ifm->ifm_media = IFM_ETHER|IFM_NONE;
3038                 mii_mediachg(mii);
3039                 ifm->ifm_media = mtmp;
3040                 ifp->if_flags = itmp;
3041         }
3042
3043         sc->bge_link = 0;
3044
3045         sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3046
3047         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3048 }
3049
3050 /*
3051  * Stop all chip I/O so that the kernel's probe routines don't
3052  * get confused by errant DMAs when rebooting.
3053  */
3054 static void
3055 bge_shutdown(device_t dev)
3056 {
3057         struct bge_softc *sc = device_get_softc(dev);
3058         struct ifnet *ifp = &sc->arpcom.ac_if;
3059
3060         lwkt_serialize_enter(ifp->if_serializer);
3061         bge_stop(sc);
3062         bge_reset(sc);
3063         lwkt_serialize_exit(ifp->if_serializer);
3064 }
3065
3066 static int
3067 bge_suspend(device_t dev)
3068 {
3069         struct bge_softc *sc = device_get_softc(dev);
3070         struct ifnet *ifp = &sc->arpcom.ac_if;
3071
3072         lwkt_serialize_enter(ifp->if_serializer);
3073         bge_stop(sc);
3074         lwkt_serialize_exit(ifp->if_serializer);
3075
3076         return 0;
3077 }
3078
3079 static int
3080 bge_resume(device_t dev)
3081 {
3082         struct bge_softc *sc = device_get_softc(dev);
3083         struct ifnet *ifp = &sc->arpcom.ac_if;
3084
3085         lwkt_serialize_enter(ifp->if_serializer);
3086
3087         if (ifp->if_flags & IFF_UP) {
3088                 bge_init(sc);
3089
3090                 if (!ifq_is_empty(&ifp->if_snd))
3091                         ifp->if_start(ifp);
3092         }
3093
3094         lwkt_serialize_exit(ifp->if_serializer);
3095
3096         return 0;
3097 }
3098
3099 static void
3100 bge_setpromisc(struct bge_softc *sc)
3101 {
3102         struct ifnet *ifp = &sc->arpcom.ac_if;
3103
3104         if (ifp->if_flags & IFF_PROMISC)
3105                 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3106         else
3107                 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3108 }
3109
3110 static void
3111 bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
3112 {
3113         struct bge_dmamap_arg *ctx = arg;
3114
3115         if (error)
3116                 return;
3117
3118         KASSERT(nsegs == 1 && ctx->bge_maxsegs == 1,
3119                 ("only one segment is allowed\n"));
3120
3121         ctx->bge_segs[0] = *segs;
3122 }
3123
3124 static void
3125 bge_dma_map_mbuf(void *arg, bus_dma_segment_t *segs, int nsegs,
3126                  bus_size_t mapsz __unused, int error)
3127 {
3128         struct bge_dmamap_arg *ctx = arg;
3129         int i;
3130
3131         if (error)
3132                 return;
3133
3134         if (nsegs > ctx->bge_maxsegs) {
3135                 ctx->bge_maxsegs = 0;
3136                 return;
3137         }
3138
3139         ctx->bge_maxsegs = nsegs;
3140         for (i = 0; i < nsegs; ++i)
3141                 ctx->bge_segs[i] = segs[i];
3142 }
3143
3144 static void
3145 bge_dma_free(struct bge_softc *sc)
3146 {
3147         int i;
3148
3149         /* Destroy RX/TX mbuf DMA stuffs. */
3150         if (sc->bge_cdata.bge_mtag != NULL) {
3151                 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3152                         if (sc->bge_cdata.bge_rx_std_dmamap[i]) {
3153                                 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3154                                     sc->bge_cdata.bge_rx_std_dmamap[i]);
3155                         }
3156                 }
3157
3158                 for (i = 0; i < BGE_TX_RING_CNT; i++) {
3159                         if (sc->bge_cdata.bge_tx_dmamap[i]) {
3160                                 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3161                                     sc->bge_cdata.bge_tx_dmamap[i]);
3162                         }
3163                 }
3164                 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
3165         }
3166
3167         /* Destroy standard RX ring */
3168         bge_dma_block_free(sc->bge_cdata.bge_rx_std_ring_tag,
3169                            sc->bge_cdata.bge_rx_std_ring_map,
3170                            sc->bge_ldata.bge_rx_std_ring);
3171
3172         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3173             sc->bge_asicrev != BGE_ASICREV_BCM5750)
3174                 bge_free_jumbo_mem(sc);
3175
3176         /* Destroy RX return ring */
3177         bge_dma_block_free(sc->bge_cdata.bge_rx_return_ring_tag,
3178                            sc->bge_cdata.bge_rx_return_ring_map,
3179                            sc->bge_ldata.bge_rx_return_ring);
3180
3181         /* Destroy TX ring */
3182         bge_dma_block_free(sc->bge_cdata.bge_tx_ring_tag,
3183                            sc->bge_cdata.bge_tx_ring_map,
3184                            sc->bge_ldata.bge_tx_ring);
3185
3186         /* Destroy status block */
3187         bge_dma_block_free(sc->bge_cdata.bge_status_tag,
3188                            sc->bge_cdata.bge_status_map,
3189                            sc->bge_ldata.bge_status_block);
3190
3191         /* Destroy statistics block */
3192         bge_dma_block_free(sc->bge_cdata.bge_stats_tag,
3193                            sc->bge_cdata.bge_stats_map,
3194                            sc->bge_ldata.bge_stats);
3195
3196         /* Destroy the parent tag */
3197         if (sc->bge_cdata.bge_parent_tag != NULL)
3198                 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
3199 }
3200
3201 static int
3202 bge_dma_alloc(struct bge_softc *sc)
3203 {
3204         struct ifnet *ifp = &sc->arpcom.ac_if;
3205         int nseg, i, error;
3206
3207         /*
3208          * Allocate the parent bus DMA tag appropriate for PCI.
3209          */
3210         error = bus_dma_tag_create(NULL, 1, 0,
3211                                    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3212                                    NULL, NULL,
3213                                    MAXBSIZE, BGE_NSEG_NEW,
3214                                    BUS_SPACE_MAXSIZE_32BIT,
3215                                    0, &sc->bge_cdata.bge_parent_tag);
3216         if (error) {
3217                 if_printf(ifp, "could not allocate parent dma tag\n");
3218                 return error;
3219         }
3220
3221         /*
3222          * Create DMA tag for mbufs.
3223          */
3224         nseg = BGE_NSEG_NEW;
3225         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0,
3226                                    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3227                                    NULL, NULL,
3228                                    MCLBYTES * nseg, nseg, MCLBYTES,
3229                                    BUS_DMA_ALLOCNOW, &sc->bge_cdata.bge_mtag);
3230         if (error) {
3231                 if_printf(ifp, "could not allocate mbuf dma tag\n");
3232                 return error;
3233         }
3234
3235         /*
3236          * Create DMA maps for TX/RX mbufs.
3237          */
3238         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3239                 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
3240                                           &sc->bge_cdata.bge_rx_std_dmamap[i]);
3241                 if (error) {
3242                         int j;
3243
3244                         for (j = 0; j < i; ++j) {
3245                                 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3246                                         sc->bge_cdata.bge_rx_std_dmamap[j]);
3247                         }
3248                         bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
3249                         sc->bge_cdata.bge_mtag = NULL;
3250
3251                         if_printf(ifp, "could not create DMA map for RX\n");
3252                         return error;
3253                 }
3254         }
3255
3256         for (i = 0; i < BGE_TX_RING_CNT; i++) {
3257                 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
3258                                           &sc->bge_cdata.bge_tx_dmamap[i]);
3259                 if (error) {
3260                         int j;
3261
3262                         for (j = 0; j < BGE_STD_RX_RING_CNT; ++j) {
3263                                 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3264                                         sc->bge_cdata.bge_rx_std_dmamap[j]);
3265                         }
3266                         for (j = 0; j < i; ++j) {
3267                                 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3268                                         sc->bge_cdata.bge_tx_dmamap[j]);
3269                         }
3270                         bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
3271                         sc->bge_cdata.bge_mtag = NULL;
3272
3273                         if_printf(ifp, "could not create DMA map for TX\n");
3274                         return error;
3275                 }
3276         }
3277
3278         /*
3279          * Create DMA stuffs for standard RX ring.
3280          */
3281         error = bge_dma_block_alloc(sc, BGE_STD_RX_RING_SZ,
3282                                     &sc->bge_cdata.bge_rx_std_ring_tag,
3283                                     &sc->bge_cdata.bge_rx_std_ring_map,
3284                                     (void **)&sc->bge_ldata.bge_rx_std_ring,
3285                                     &sc->bge_ldata.bge_rx_std_ring_paddr);
3286         if (error) {
3287                 if_printf(ifp, "could not create std RX ring\n");
3288                 return error;
3289         }
3290
3291         /*
3292          * Create jumbo buffer pool.
3293          */
3294         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3295             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
3296                 error = bge_alloc_jumbo_mem(sc);
3297                 if (error) {
3298                         if_printf(ifp, "could not create jumbo buffer pool\n");
3299                         return error;
3300                 }
3301         }
3302
3303         /*
3304          * Create DMA stuffs for RX return ring.
3305          */
3306         error = bge_dma_block_alloc(sc, BGE_RX_RTN_RING_SZ(sc),
3307                                     &sc->bge_cdata.bge_rx_return_ring_tag,
3308                                     &sc->bge_cdata.bge_rx_return_ring_map,
3309                                     (void **)&sc->bge_ldata.bge_rx_return_ring,
3310                                     &sc->bge_ldata.bge_rx_return_ring_paddr);
3311         if (error) {
3312                 if_printf(ifp, "could not create RX ret ring\n");
3313                 return error;
3314         }
3315
3316         /*
3317          * Create DMA stuffs for TX ring.
3318          */
3319         error = bge_dma_block_alloc(sc, BGE_TX_RING_SZ,
3320                                     &sc->bge_cdata.bge_tx_ring_tag,
3321                                     &sc->bge_cdata.bge_tx_ring_map,
3322                                     (void **)&sc->bge_ldata.bge_tx_ring,
3323                                     &sc->bge_ldata.bge_tx_ring_paddr);
3324         if (error) {
3325                 if_printf(ifp, "could not create TX ring\n");
3326                 return error;
3327         }
3328
3329         /*
3330          * Create DMA stuffs for status block.
3331          */
3332         error = bge_dma_block_alloc(sc, BGE_STATUS_BLK_SZ,
3333                                     &sc->bge_cdata.bge_status_tag,
3334                                     &sc->bge_cdata.bge_status_map,
3335                                     (void **)&sc->bge_ldata.bge_status_block,
3336                                     &sc->bge_ldata.bge_status_block_paddr);
3337         if (error) {
3338                 if_printf(ifp, "could not create status block\n");
3339                 return error;
3340         }
3341
3342         /*
3343          * Create DMA stuffs for statistics block.
3344          */
3345         error = bge_dma_block_alloc(sc, BGE_STATS_SZ,
3346                                     &sc->bge_cdata.bge_stats_tag,
3347                                     &sc->bge_cdata.bge_stats_map,
3348                                     (void **)&sc->bge_ldata.bge_stats,
3349                                     &sc->bge_ldata.bge_stats_paddr);
3350         if (error) {
3351                 if_printf(ifp, "could not create stats block\n");
3352                 return error;
3353         }
3354         return 0;
3355 }
3356
3357 static int
3358 bge_dma_block_alloc(struct bge_softc *sc, bus_size_t size, bus_dma_tag_t *tag,
3359                     bus_dmamap_t *map, void **addr, bus_addr_t *paddr)
3360 {
3361         struct ifnet *ifp = &sc->arpcom.ac_if;
3362         struct bge_dmamap_arg ctx;
3363         bus_dma_segment_t seg;
3364         int error;
3365
3366         /*
3367          * Create DMA tag
3368          */
3369         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0,
3370                                    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3371                                    NULL, NULL, size, 1, size, 0, tag);
3372         if (error) {
3373                 if_printf(ifp, "could not allocate dma tag\n");
3374                 return error;
3375         }
3376
3377         /*
3378          * Allocate DMA'able memory
3379          */
3380         error = bus_dmamem_alloc(*tag, addr, BUS_DMA_WAITOK | BUS_DMA_ZERO,
3381                                  map);
3382         if (error) {
3383                 if_printf(ifp, "could not allocate dma memory\n");
3384                 bus_dma_tag_destroy(*tag);
3385                 *tag = NULL;
3386                 return error;
3387         }
3388
3389         /*
3390          * Load the DMA'able memory
3391          */
3392         ctx.bge_maxsegs = 1;
3393         ctx.bge_segs = &seg;
3394         error = bus_dmamap_load(*tag, *map, *addr, size, bge_dma_map_addr, &ctx,
3395                                 BUS_DMA_WAITOK);
3396         if (error) {
3397                 if_printf(ifp, "could not load dma memory\n");
3398                 bus_dmamem_free(*tag, *addr, *map);
3399                 bus_dma_tag_destroy(*tag);
3400                 *tag = NULL;
3401                 return error;
3402         }
3403         *paddr = ctx.bge_segs[0].ds_addr;
3404
3405         return 0;
3406 }
3407
3408 static void
3409 bge_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr)
3410 {
3411         if (tag != NULL) {
3412                 bus_dmamap_unload(tag, map);
3413                 bus_dmamem_free(tag, addr, map);
3414                 bus_dma_tag_destroy(tag);
3415         }
3416 }