Correct hardware csum offload support for bge(4)
[dragonfly.git] / sys / dev / netif / bge / if_bge.c
1 /*
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
34  * $DragonFly: src/sys/dev/netif/bge/if_bge.c,v 1.69 2007/04/23 15:14:37 sephe Exp $
35  *
36  */
37
38 /*
39  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
40  * 
41  * Written by Bill Paul <wpaul@windriver.com>
42  * Senior Engineer, Wind River Systems
43  */
44
45 /*
46  * The Broadcom BCM5700 is based on technology originally developed by
47  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
49  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
51  * frames, highly configurable RX filtering, and 16 RX and TX queues
52  * (which, along with RX filter rules, can be used for QOS applications).
53  * Other features, such as TCP segmentation, may be available as part
54  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55  * firmware images can be stored in hardware and need not be compiled
56  * into the driver.
57  *
58  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
60  * 
61  * The BCM5701 is a single-chip solution incorporating both the BCM5700
62  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63  * does not support external SSRAM.
64  *
65  * Broadcom also produces a variation of the BCM5700 under the "Altima"
66  * brand name, which is functionally similar but lacks PCI-X support.
67  *
68  * Without external SSRAM, you can only have at most 4 TX rings,
69  * and the use of the mini RX ring is disabled. This seems to imply
70  * that these features are simply not available on the BCM5701. As a
71  * result, this driver does not implement any support for the mini RX
72  * ring.
73  */
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/endian.h>
78 #include <sys/sockio.h>
79 #include <sys/mbuf.h>
80 #include <sys/malloc.h>
81 #include <sys/kernel.h>
82 #include <sys/socket.h>
83 #include <sys/queue.h>
84 #include <sys/serialize.h>
85 #include <sys/thread2.h>
86
87 #include <net/if.h>
88 #include <net/ifq_var.h>
89 #include <net/if_arp.h>
90 #include <net/ethernet.h>
91 #include <net/if_dl.h>
92 #include <net/if_media.h>
93
94 #include <net/bpf.h>
95
96 #include <net/if_types.h>
97 #include <net/vlan/if_vlan_var.h>
98
99 #include <netinet/in_systm.h>
100 #include <netinet/in.h>
101 #include <netinet/ip.h>
102
103 #include <sys/bus.h>
104 #include <sys/rman.h>
105
106 #include <dev/netif/mii_layer/mii.h>
107 #include <dev/netif/mii_layer/miivar.h>
108 #include <dev/netif/mii_layer/miidevs.h>
109 #include <dev/netif/mii_layer/brgphyreg.h>
110
111 #include <bus/pci/pcidevs.h>
112 #include <bus/pci/pcireg.h>
113 #include <bus/pci/pcivar.h>
114
115 #include "if_bgereg.h"
116
117 #define BGE_CSUM_FEATURES       (CSUM_IP | CSUM_TCP | CSUM_UDP)
118 #define BGE_MIN_FRAME           60
119
120 /* "controller miibus0" required.  See GENERIC if you get errors here. */
121 #include "miibus_if.h"
122
123 /*
124  * Various supported device vendors/types and their names. Note: the
125  * spec seems to indicate that the hardware still has Alteon's vendor
126  * ID burned into it, though it will always be overriden by the vendor
127  * ID in the EEPROM. Just to be safe, we cover all possibilities.
128  */
129 #define BGE_DEVDESC_MAX         64      /* Maximum device description length */
130
131 static struct bge_type bge_devs[] = {
132         { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700,
133                 "Alteon BCM5700 Gigabit Ethernet" },
134         { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701,
135                 "Alteon BCM5701 Gigabit Ethernet" },
136         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700,
137                 "Broadcom BCM5700 Gigabit Ethernet" },
138         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701,
139                 "Broadcom BCM5701 Gigabit Ethernet" },
140         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X,
141                 "Broadcom BCM5702X Gigabit Ethernet" },
142         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT,
143                 "Broadcom BCM5702 Gigabit Ethernet" },
144         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X,
145                 "Broadcom BCM5703X Gigabit Ethernet" },
146         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703A3,
147                 "Broadcom BCM5703 Gigabit Ethernet" },
148         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C,
149                 "Broadcom BCM5704C Dual Gigabit Ethernet" },
150         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S,
151                 "Broadcom BCM5704S Dual Gigabit Ethernet" },
152         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705,
153                 "Broadcom BCM5705 Gigabit Ethernet" },
154         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K,
155                 "Broadcom BCM5705K Gigabit Ethernet" },
156         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M,
157                 "Broadcom BCM5705M Gigabit Ethernet" },
158         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT,
159                 "Broadcom BCM5705M Gigabit Ethernet" },
160         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714,
161                 "Broadcom BCM5714C Gigabit Ethernet" },
162         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721,
163                 "Broadcom BCM5721 Gigabit Ethernet" },
164         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750,
165                 "Broadcom BCM5750 Gigabit Ethernet" },
166         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750M,
167                 "Broadcom BCM5750M Gigabit Ethernet" },
168         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751,
169                 "Broadcom BCM5751 Gigabit Ethernet" },
170         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M,
171                 "Broadcom BCM5751M Gigabit Ethernet" },
172         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752,
173                 "Broadcom BCM5752 Gigabit Ethernet" },
174         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782,
175                 "Broadcom BCM5782 Gigabit Ethernet" },
176         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788,
177                 "Broadcom BCM5788 Gigabit Ethernet" },
178         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789,
179                 "Broadcom BCM5789 Gigabit Ethernet" },
180         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901,
181                 "Broadcom BCM5901 Fast Ethernet" },
182         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2,
183                 "Broadcom BCM5901A2 Fast Ethernet" },
184         { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
185                 "SysKonnect Gigabit Ethernet" },
186         { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000,
187                 "Altima AC1000 Gigabit Ethernet" },
188         { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001,
189                 "Altima AC1002 Gigabit Ethernet" },
190         { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100,
191                 "Altima AC9100 Gigabit Ethernet" },
192         { 0, 0, NULL }
193 };
194
195 static int      bge_probe(device_t);
196 static int      bge_attach(device_t);
197 static int      bge_detach(device_t);
198 static void     bge_release_resources(struct bge_softc *);
199 static void     bge_txeof(struct bge_softc *);
200 static void     bge_rxeof(struct bge_softc *);
201
202 static void     bge_tick(void *);
203 static void     bge_tick_serialized(void *);
204 static void     bge_stats_update(struct bge_softc *);
205 static void     bge_stats_update_regs(struct bge_softc *);
206 static int      bge_encap(struct bge_softc *, struct mbuf *, uint32_t *);
207
208 static void     bge_intr(void *);
209 static void     bge_start(struct ifnet *);
210 static int      bge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
211 static void     bge_init(void *);
212 static void     bge_stop(struct bge_softc *);
213 static void     bge_watchdog(struct ifnet *);
214 static void     bge_shutdown(device_t);
215 static int      bge_suspend(device_t);
216 static int      bge_resume(device_t);
217 static int      bge_ifmedia_upd(struct ifnet *);
218 static void     bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
219
220 static uint8_t  bge_eeprom_getbyte(struct bge_softc *, uint32_t, uint8_t *);
221 static int      bge_read_eeprom(struct bge_softc *, caddr_t, uint32_t, size_t);
222
223 static void     bge_setmulti(struct bge_softc *);
224 static void     bge_setpromisc(struct bge_softc *);
225
226 static void     bge_handle_events(struct bge_softc *);
227 static int      bge_alloc_jumbo_mem(struct bge_softc *);
228 static void     bge_free_jumbo_mem(struct bge_softc *);
229 static struct bge_jslot
230                 *bge_jalloc(struct bge_softc *);
231 static void     bge_jfree(void *);
232 static void     bge_jref(void *);
233 static int      bge_newbuf_std(struct bge_softc *, int, struct mbuf *);
234 static int      bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
235 static int      bge_init_rx_ring_std(struct bge_softc *);
236 static void     bge_free_rx_ring_std(struct bge_softc *);
237 static int      bge_init_rx_ring_jumbo(struct bge_softc *);
238 static void     bge_free_rx_ring_jumbo(struct bge_softc *);
239 static void     bge_free_tx_ring(struct bge_softc *);
240 static int      bge_init_tx_ring(struct bge_softc *);
241
242 static int      bge_chipinit(struct bge_softc *);
243 static int      bge_blockinit(struct bge_softc *);
244
245 #ifdef notdef
246 static uint8_t  bge_vpd_readbyte(struct bge_softc *, uint32_t);
247 static void     bge_vpd_read_res(struct bge_softc *, struct vpd_res *, uint32_t);
248 static void     bge_vpd_read(struct bge_softc *);
249 #endif
250
251 static uint32_t bge_readmem_ind(struct bge_softc *, uint32_t);
252 static void     bge_writemem_ind(struct bge_softc *, uint32_t, uint32_t);
253 #ifdef notdef
254 static uint32_t bge_readreg_ind(struct bge_softc *, uint32_t);
255 #endif
256 static void     bge_writereg_ind(struct bge_softc *, uint32_t, uint32_t);
257
258 static int      bge_miibus_readreg(device_t, int, int);
259 static int      bge_miibus_writereg(device_t, int, int, int);
260 static void     bge_miibus_statchg(device_t);
261
262 static void     bge_reset(struct bge_softc *);
263
264 static void     bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
265 static void     bge_dma_map_mbuf(void *, bus_dma_segment_t *, int,
266                                  bus_size_t, int);
267 static int      bge_dma_alloc(struct bge_softc *);
268 static void     bge_dma_free(struct bge_softc *);
269 static int      bge_dma_block_alloc(struct bge_softc *, bus_size_t,
270                                     bus_dma_tag_t *, bus_dmamap_t *,
271                                     void **, bus_addr_t *);
272 static void     bge_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
273
274 /*
275  * Set following tunable to 1 for some IBM blade servers with the DNLK
276  * switch module. Auto negotiation is broken for those configurations.
277  */
278 static int      bge_fake_autoneg = 0;
279 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg);
280
281 static device_method_t bge_methods[] = {
282         /* Device interface */
283         DEVMETHOD(device_probe,         bge_probe),
284         DEVMETHOD(device_attach,        bge_attach),
285         DEVMETHOD(device_detach,        bge_detach),
286         DEVMETHOD(device_shutdown,      bge_shutdown),
287         DEVMETHOD(device_suspend,       bge_suspend),
288         DEVMETHOD(device_resume,        bge_resume),
289
290         /* bus interface */
291         DEVMETHOD(bus_print_child,      bus_generic_print_child),
292         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
293
294         /* MII interface */
295         DEVMETHOD(miibus_readreg,       bge_miibus_readreg),
296         DEVMETHOD(miibus_writereg,      bge_miibus_writereg),
297         DEVMETHOD(miibus_statchg,       bge_miibus_statchg),
298
299         { 0, 0 }
300 };
301
302 static DEFINE_CLASS_0(bge, bge_driver, bge_methods, sizeof(struct bge_softc));
303 static devclass_t bge_devclass;
304
305 DECLARE_DUMMY_MODULE(if_bge);
306 DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, 0, 0);
307 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
308
309 static uint32_t
310 bge_readmem_ind(struct bge_softc *sc, uint32_t off)
311 {
312         device_t dev = sc->bge_dev;
313
314         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
315         return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
316 }
317
318 static void
319 bge_writemem_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
320 {
321         device_t dev = sc->bge_dev;
322
323         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
324         pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
325 }
326
327 #ifdef notdef
328 static uint32_t
329 bge_readreg_ind(struct bge_softc *sc, uin32_t off)
330 {
331         device_t dev = sc->bge_dev;
332
333         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
334         return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
335 }
336 #endif
337
338 static void
339 bge_writereg_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
340 {
341         device_t dev = sc->bge_dev;
342
343         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
344         pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
345 }
346
347 #ifdef notdef
348 static uint8_t
349 bge_vpd_readbyte(struct bge_softc *sc, uint32_t addr)
350 {
351         device_t dev = sc->bge_dev;
352         uint32_t val;
353         int i;
354
355         pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
356         for (i = 0; i < BGE_TIMEOUT * 10; i++) {
357                 DELAY(10);
358                 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
359                         break;
360         }
361
362         if (i == BGE_TIMEOUT) {
363                 device_printf(sc->bge_dev, "VPD read timed out\n");
364                 return(0);
365         }
366
367         val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
368
369         return((val >> ((addr % 4) * 8)) & 0xFF);
370 }
371
372 static void
373 bge_vpd_read_res(struct bge_softc *sc, struct vpd_res *res, uint32_t addr)
374 {
375         size_t i;
376         uint8_t *ptr;
377
378         ptr = (uint8_t *)res;
379         for (i = 0; i < sizeof(struct vpd_res); i++)
380                 ptr[i] = bge_vpd_readbyte(sc, i + addr);
381
382         return;
383 }
384
385 static void
386 bge_vpd_read(struct bge_softc *sc)
387 {
388         int pos = 0, i;
389         struct vpd_res res;
390
391         if (sc->bge_vpd_prodname != NULL)
392                 kfree(sc->bge_vpd_prodname, M_DEVBUF);
393         if (sc->bge_vpd_readonly != NULL)
394                 kfree(sc->bge_vpd_readonly, M_DEVBUF);
395         sc->bge_vpd_prodname = NULL;
396         sc->bge_vpd_readonly = NULL;
397
398         bge_vpd_read_res(sc, &res, pos);
399
400         if (res.vr_id != VPD_RES_ID) {
401                 device_printf(sc->bge_dev,
402                               "bad VPD resource id: expected %x got %x\n",
403                               VPD_RES_ID, res.vr_id);
404                 return;
405         }
406
407         pos += sizeof(res);
408         sc->bge_vpd_prodname = kmalloc(res.vr_len + 1, M_DEVBUF, M_INTWAIT);
409         for (i = 0; i < res.vr_len; i++)
410                 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
411         sc->bge_vpd_prodname[i] = '\0';
412         pos += i;
413
414         bge_vpd_read_res(sc, &res, pos);
415
416         if (res.vr_id != VPD_RES_READ) {
417                 device_printf(sc->bge_dev,
418                               "bad VPD resource id: expected %x got %x\n",
419                               VPD_RES_READ, res.vr_id);
420                 return;
421         }
422
423         pos += sizeof(res);
424         sc->bge_vpd_readonly = kmalloc(res.vr_len, M_DEVBUF, M_INTWAIT);
425         for (i = 0; i < res.vr_len + 1; i++)
426                 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
427 }
428 #endif
429
430 /*
431  * Read a byte of data stored in the EEPROM at address 'addr.' The
432  * BCM570x supports both the traditional bitbang interface and an
433  * auto access interface for reading the EEPROM. We use the auto
434  * access method.
435  */
436 static uint8_t
437 bge_eeprom_getbyte(struct bge_softc *sc, uint32_t addr, uint8_t *dest)
438 {
439         int i;
440         uint32_t byte = 0;
441
442         /*
443          * Enable use of auto EEPROM access so we can avoid
444          * having to use the bitbang method.
445          */
446         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
447
448         /* Reset the EEPROM, load the clock period. */
449         CSR_WRITE_4(sc, BGE_EE_ADDR,
450             BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
451         DELAY(20);
452
453         /* Issue the read EEPROM command. */
454         CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
455
456         /* Wait for completion */
457         for(i = 0; i < BGE_TIMEOUT * 10; i++) {
458                 DELAY(10);
459                 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
460                         break;
461         }
462
463         if (i == BGE_TIMEOUT) {
464                 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
465                 return(1);
466         }
467
468         /* Get result. */
469         byte = CSR_READ_4(sc, BGE_EE_DATA);
470
471         *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
472
473         return(0);
474 }
475
476 /*
477  * Read a sequence of bytes from the EEPROM.
478  */
479 static int
480 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, uint32_t off, size_t len)
481 {
482         size_t i;
483         int err;
484         uint8_t byte;
485
486         for (byte = 0, err = 0, i = 0; i < len; i++) {
487                 err = bge_eeprom_getbyte(sc, off + i, &byte);
488                 if (err)
489                         break;
490                 *(dest + i) = byte;
491         }
492
493         return(err ? 1 : 0);
494 }
495
496 static int
497 bge_miibus_readreg(device_t dev, int phy, int reg)
498 {
499         struct bge_softc *sc;
500         struct ifnet *ifp;
501         uint32_t val, autopoll;
502         int i;
503
504         sc = device_get_softc(dev);
505         ifp = &sc->arpcom.ac_if;
506
507         /*
508          * Broadcom's own driver always assumes the internal
509          * PHY is at GMII address 1. On some chips, the PHY responds
510          * to accesses at all addresses, which could cause us to
511          * bogusly attach the PHY 32 times at probe type. Always
512          * restricting the lookup to address 1 is simpler than
513          * trying to figure out which chips revisions should be
514          * special-cased.
515          */
516         if (phy != 1)
517                 return(0);
518
519         /* Reading with autopolling on may trigger PCI errors */
520         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
521         if (autopoll & BGE_MIMODE_AUTOPOLL) {
522                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
523                 DELAY(40);
524         }
525
526         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
527             BGE_MIPHY(phy)|BGE_MIREG(reg));
528
529         for (i = 0; i < BGE_TIMEOUT; i++) {
530                 val = CSR_READ_4(sc, BGE_MI_COMM);
531                 if (!(val & BGE_MICOMM_BUSY))
532                         break;
533         }
534
535         if (i == BGE_TIMEOUT) {
536                 if_printf(ifp, "PHY read timed out\n");
537                 val = 0;
538                 goto done;
539         }
540
541         val = CSR_READ_4(sc, BGE_MI_COMM);
542
543 done:
544         if (autopoll & BGE_MIMODE_AUTOPOLL) {
545                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
546                 DELAY(40);
547         }
548
549         if (val & BGE_MICOMM_READFAIL)
550                 return(0);
551
552         return(val & 0xFFFF);
553 }
554
555 static int
556 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
557 {
558         struct bge_softc *sc;
559         uint32_t autopoll;
560         int i;
561
562         sc = device_get_softc(dev);
563
564         /* Reading with autopolling on may trigger PCI errors */
565         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
566         if (autopoll & BGE_MIMODE_AUTOPOLL) {
567                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
568                 DELAY(40);
569         }
570
571         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
572             BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
573
574         for (i = 0; i < BGE_TIMEOUT; i++) {
575                 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
576                         break;
577         }
578
579         if (autopoll & BGE_MIMODE_AUTOPOLL) {
580                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
581                 DELAY(40);
582         }
583
584         if (i == BGE_TIMEOUT) {
585                 if_printf(&sc->arpcom.ac_if, "PHY read timed out\n");
586                 return(0);
587         }
588
589         return(0);
590 }
591
592 static void
593 bge_miibus_statchg(device_t dev)
594 {
595         struct bge_softc *sc;
596         struct mii_data *mii;
597
598         sc = device_get_softc(dev);
599         mii = device_get_softc(sc->bge_miibus);
600
601         BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
602         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
603                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
604         } else {
605                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
606         }
607
608         if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
609                 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
610         } else {
611                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
612         }
613 }
614
615 /*
616  * Handle events that have triggered interrupts.
617  */
618 static void
619 bge_handle_events(struct bge_softc *sc)
620 {
621 }
622
623 /*
624  * Memory management for jumbo frames.
625  */
626 static int
627 bge_alloc_jumbo_mem(struct bge_softc *sc)
628 {
629         struct ifnet *ifp = &sc->arpcom.ac_if;
630         struct bge_jslot *entry;
631         uint8_t *ptr;
632         bus_addr_t paddr;
633         int i, error;
634
635         /*
636          * Create tag for jumbo mbufs.
637          * This is really a bit of a kludge. We allocate a special
638          * jumbo buffer pool which (thanks to the way our DMA
639          * memory allocation works) will consist of contiguous
640          * pages. This means that even though a jumbo buffer might
641          * be larger than a page size, we don't really need to
642          * map it into more than one DMA segment. However, the
643          * default mbuf tag will result in multi-segment mappings,
644          * so we have to create a special jumbo mbuf tag that
645          * lets us get away with mapping the jumbo buffers as
646          * a single segment. I think eventually the driver should
647          * be changed so that it uses ordinary mbufs and cluster
648          * buffers, i.e. jumbo frames can span multiple DMA
649          * descriptors. But that's a project for another day.
650          */
651
652         /*
653          * Create DMA stuffs for jumbo RX ring.
654          */
655         error = bge_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ,
656                                     &sc->bge_cdata.bge_rx_jumbo_ring_tag,
657                                     &sc->bge_cdata.bge_rx_jumbo_ring_map,
658                                     (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
659                                     &sc->bge_ldata.bge_rx_jumbo_ring_paddr);
660         if (error) {
661                 if_printf(ifp, "could not create jumbo RX ring\n");
662                 return error;
663         }
664
665         /*
666          * Create DMA stuffs for jumbo buffer block.
667          */
668         error = bge_dma_block_alloc(sc, BGE_JMEM,
669                                     &sc->bge_cdata.bge_jumbo_tag,
670                                     &sc->bge_cdata.bge_jumbo_map,
671                                     (void **)&sc->bge_ldata.bge_jumbo_buf,
672                                     &paddr);
673         if (error) {
674                 if_printf(ifp, "could not create jumbo buffer\n");
675                 return error;
676         }
677
678         SLIST_INIT(&sc->bge_jfree_listhead);
679
680         /*
681          * Now divide it up into 9K pieces and save the addresses
682          * in an array. Note that we play an evil trick here by using
683          * the first few bytes in the buffer to hold the the address
684          * of the softc structure for this interface. This is because
685          * bge_jfree() needs it, but it is called by the mbuf management
686          * code which will not pass it to us explicitly.
687          */
688         for (i = 0, ptr = sc->bge_ldata.bge_jumbo_buf; i < BGE_JSLOTS; i++) {
689                 entry = &sc->bge_cdata.bge_jslots[i];
690                 entry->bge_sc = sc;
691                 entry->bge_buf = ptr;
692                 entry->bge_paddr = paddr;
693                 entry->bge_inuse = 0;
694                 entry->bge_slot = i;
695                 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jslot_link);
696
697                 ptr += BGE_JLEN;
698                 paddr += BGE_JLEN;
699         }
700         return 0;
701 }
702
703 static void
704 bge_free_jumbo_mem(struct bge_softc *sc)
705 {
706         /* Destroy jumbo RX ring. */
707         bge_dma_block_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
708                            sc->bge_cdata.bge_rx_jumbo_ring_map,
709                            sc->bge_ldata.bge_rx_jumbo_ring);
710
711         /* Destroy jumbo buffer block. */
712         bge_dma_block_free(sc->bge_cdata.bge_jumbo_tag,
713                            sc->bge_cdata.bge_jumbo_map,
714                            sc->bge_ldata.bge_jumbo_buf);
715 }
716
717 /*
718  * Allocate a jumbo buffer.
719  */
720 static struct bge_jslot *
721 bge_jalloc(struct bge_softc *sc)
722 {
723         struct bge_jslot *entry;
724
725         lwkt_serialize_enter(&sc->bge_jslot_serializer);
726         entry = SLIST_FIRST(&sc->bge_jfree_listhead);
727         if (entry) {
728                 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jslot_link);
729                 entry->bge_inuse = 1;
730         } else {
731                 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
732         }
733         lwkt_serialize_exit(&sc->bge_jslot_serializer);
734         return(entry);
735 }
736
737 /*
738  * Adjust usage count on a jumbo buffer.
739  */
740 static void
741 bge_jref(void *arg)
742 {
743         struct bge_jslot *entry = (struct bge_jslot *)arg;
744         struct bge_softc *sc = entry->bge_sc;
745
746         if (sc == NULL)
747                 panic("bge_jref: can't find softc pointer!");
748
749         if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
750                 panic("bge_jref: asked to reference buffer "
751                     "that we don't manage!");
752         } else if (entry->bge_inuse == 0) {
753                 panic("bge_jref: buffer already free!");
754         } else {
755                 atomic_add_int(&entry->bge_inuse, 1);
756         }
757 }
758
759 /*
760  * Release a jumbo buffer.
761  */
762 static void
763 bge_jfree(void *arg)
764 {
765         struct bge_jslot *entry = (struct bge_jslot *)arg;
766         struct bge_softc *sc = entry->bge_sc;
767
768         if (sc == NULL)
769                 panic("bge_jfree: can't find softc pointer!");
770
771         if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
772                 panic("bge_jfree: asked to free buffer that we don't manage!");
773         } else if (entry->bge_inuse == 0) {
774                 panic("bge_jfree: buffer already free!");
775         } else {
776                 /*
777                  * Possible MP race to 0, use the serializer.  The atomic insn
778                  * is still needed for races against bge_jref().
779                  */
780                 lwkt_serialize_enter(&sc->bge_jslot_serializer);
781                 atomic_subtract_int(&entry->bge_inuse, 1);
782                 if (entry->bge_inuse == 0) {
783                         SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 
784                                           entry, jslot_link);
785                 }
786                 lwkt_serialize_exit(&sc->bge_jslot_serializer);
787         }
788 }
789
790
791 /*
792  * Intialize a standard receive ring descriptor.
793  */
794 static int
795 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m)
796 {
797         struct mbuf *m_new = NULL;
798         struct bge_dmamap_arg ctx;
799         bus_dma_segment_t seg;
800         struct bge_rx_bd *r;
801         int error;
802
803         if (m == NULL) {
804                 m_new = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
805                 if (m_new == NULL)
806                         return ENOBUFS;
807         } else {
808                 m_new = m;
809                 m_new->m_data = m_new->m_ext.ext_buf;
810         }
811         m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
812
813         if (!sc->bge_rx_alignment_bug)
814                 m_adj(m_new, ETHER_ALIGN);
815
816         ctx.bge_maxsegs = 1;
817         ctx.bge_segs = &seg;
818         error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag,
819                                      sc->bge_cdata.bge_rx_std_dmamap[i],
820                                      m_new, bge_dma_map_mbuf, &ctx,
821                                      BUS_DMA_NOWAIT);
822         if (error || ctx.bge_maxsegs == 0) {
823                 if (m == NULL)
824                         m_freem(m_new);
825                 return ENOMEM;
826         }
827
828         sc->bge_cdata.bge_rx_std_chain[i] = m_new;
829
830         r = &sc->bge_ldata.bge_rx_std_ring[i];
831         r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_segs[0].ds_addr);
832         r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_segs[0].ds_addr);
833         r->bge_flags = BGE_RXBDFLAG_END;
834         r->bge_len = m_new->m_len;
835         r->bge_idx = i;
836
837         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
838                         sc->bge_cdata.bge_rx_std_dmamap[i],
839                         BUS_DMASYNC_PREREAD);
840         return 0;
841 }
842
843 /*
844  * Initialize a jumbo receive ring descriptor. This allocates
845  * a jumbo buffer from the pool managed internally by the driver.
846  */
847 static int
848 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
849 {
850         struct mbuf *m_new = NULL;
851         struct bge_jslot *buf;
852         struct bge_rx_bd *r;
853         bus_addr_t paddr;
854
855         if (m == NULL) {
856                 /* Allocate the mbuf. */
857                 MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
858                 if (m_new == NULL)
859                         return(ENOBUFS);
860
861                 /* Allocate the jumbo buffer */
862                 buf = bge_jalloc(sc);
863                 if (buf == NULL) {
864                         m_freem(m_new);
865                         if_printf(&sc->arpcom.ac_if, "jumbo allocation failed "
866                             "-- packet dropped!\n");
867                         return ENOBUFS;
868                 }
869
870                 /* Attach the buffer to the mbuf. */
871                 m_new->m_ext.ext_arg = buf;
872                 m_new->m_ext.ext_buf = buf->bge_buf;
873                 m_new->m_ext.ext_free = bge_jfree;
874                 m_new->m_ext.ext_ref = bge_jref;
875                 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
876
877                 m_new->m_flags |= M_EXT;
878         } else {
879                 KKASSERT(m->m_flags & M_EXT);
880                 m_new = m;
881                 buf = m_new->m_ext.ext_arg;
882         }
883         m_new->m_data = m_new->m_ext.ext_buf;
884         m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
885
886         paddr = buf->bge_paddr;
887         if (!sc->bge_rx_alignment_bug) {
888                 m_adj(m_new, ETHER_ALIGN);
889                 paddr += ETHER_ALIGN;
890         }
891
892         /* Set up the descriptor. */
893         sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
894
895         r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
896         r->bge_addr.bge_addr_lo = BGE_ADDR_LO(paddr);
897         r->bge_addr.bge_addr_hi = BGE_ADDR_HI(paddr);
898         r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
899         r->bge_len = m_new->m_len;
900         r->bge_idx = i;
901
902         return 0;
903 }
904
905 /*
906  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
907  * that's 1MB or memory, which is a lot. For now, we fill only the first
908  * 256 ring entries and hope that our CPU is fast enough to keep up with
909  * the NIC.
910  */
911 static int
912 bge_init_rx_ring_std(struct bge_softc *sc)
913 {
914         int i;
915
916         for (i = 0; i < BGE_SSLOTS; i++) {
917                 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
918                         return(ENOBUFS);
919         };
920
921         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
922                         sc->bge_cdata.bge_rx_std_ring_map,
923                         BUS_DMASYNC_PREWRITE);
924
925         sc->bge_std = i - 1;
926         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
927
928         return(0);
929 }
930
931 static void
932 bge_free_rx_ring_std(struct bge_softc *sc)
933 {
934         int i;
935
936         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
937                 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
938                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
939                                           sc->bge_cdata.bge_rx_std_dmamap[i]);
940                         m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
941                         sc->bge_cdata.bge_rx_std_chain[i] = NULL;
942                 }
943                 bzero(&sc->bge_ldata.bge_rx_std_ring[i],
944                     sizeof(struct bge_rx_bd));
945         }
946 }
947
948 static int
949 bge_init_rx_ring_jumbo(struct bge_softc *sc)
950 {
951         int i;
952         struct bge_rcb *rcb;
953
954         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
955                 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
956                         return(ENOBUFS);
957         };
958
959         bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
960                         sc->bge_cdata.bge_rx_jumbo_ring_map,
961                         BUS_DMASYNC_PREWRITE);
962
963         sc->bge_jumbo = i - 1;
964
965         rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
966         rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
967         CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
968
969         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
970
971         return(0);
972 }
973
974 static void
975 bge_free_rx_ring_jumbo(struct bge_softc *sc)
976 {
977         int i;
978
979         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
980                 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
981                         m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
982                         sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
983                 }
984                 bzero(&sc->bge_ldata.bge_rx_jumbo_ring[i],
985                     sizeof(struct bge_rx_bd));
986         }
987 }
988
989 static void
990 bge_free_tx_ring(struct bge_softc *sc)
991 {
992         int i;
993
994         for (i = 0; i < BGE_TX_RING_CNT; i++) {
995                 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
996                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
997                                           sc->bge_cdata.bge_tx_dmamap[i]);
998                         m_freem(sc->bge_cdata.bge_tx_chain[i]);
999                         sc->bge_cdata.bge_tx_chain[i] = NULL;
1000                 }
1001                 bzero(&sc->bge_ldata.bge_tx_ring[i],
1002                     sizeof(struct bge_tx_bd));
1003         }
1004 }
1005
1006 static int
1007 bge_init_tx_ring(struct bge_softc *sc)
1008 {
1009         sc->bge_txcnt = 0;
1010         sc->bge_tx_saved_considx = 0;
1011         sc->bge_tx_prodidx = 0;
1012
1013         /* Initialize transmit producer index for host-memory send ring. */
1014         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1015
1016         /* 5700 b2 errata */
1017         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1018                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1019
1020         CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1021         /* 5700 b2 errata */
1022         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1023                 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1024
1025         return(0);
1026 }
1027
1028 static void
1029 bge_setmulti(struct bge_softc *sc)
1030 {
1031         struct ifnet *ifp;
1032         struct ifmultiaddr *ifma;
1033         uint32_t hashes[4] = { 0, 0, 0, 0 };
1034         int h, i;
1035
1036         ifp = &sc->arpcom.ac_if;
1037
1038         if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1039                 for (i = 0; i < 4; i++)
1040                         CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1041                 return;
1042         }
1043
1044         /* First, zot all the existing filters. */
1045         for (i = 0; i < 4; i++)
1046                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1047
1048         /* Now program new ones. */
1049         LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1050                 if (ifma->ifma_addr->sa_family != AF_LINK)
1051                         continue;
1052                 h = ether_crc32_le(
1053                     LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1054                     ETHER_ADDR_LEN) & 0x7f;
1055                 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1056         }
1057
1058         for (i = 0; i < 4; i++)
1059                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1060 }
1061
1062 /*
1063  * Do endian, PCI and DMA initialization. Also check the on-board ROM
1064  * self-test results.
1065  */
1066 static int
1067 bge_chipinit(struct bge_softc *sc)
1068 {
1069         int i;
1070         uint32_t dma_rw_ctl;
1071
1072         /* Set endian type before we access any non-PCI registers. */
1073         pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1074
1075         /*
1076          * Check the 'ROM failed' bit on the RX CPU to see if
1077          * self-tests passed.
1078          */
1079         if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1080                 if_printf(&sc->arpcom.ac_if,
1081                           "RX CPU self-diagnostics failed!\n");
1082                 return(ENODEV);
1083         }
1084
1085         /* Clear the MAC control register */
1086         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1087
1088         /*
1089          * Clear the MAC statistics block in the NIC's
1090          * internal memory.
1091          */
1092         for (i = BGE_STATS_BLOCK;
1093             i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1094                 BGE_MEMWIN_WRITE(sc, i, 0);
1095
1096         for (i = BGE_STATUS_BLOCK;
1097             i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1098                 BGE_MEMWIN_WRITE(sc, i, 0);
1099
1100         /* Set up the PCI DMA control register. */
1101         if (sc->bge_pcie) {
1102                 /* PCI Express */
1103                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1104                     (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1105                     (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1106         } else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1107                    BGE_PCISTATE_PCI_BUSMODE) {
1108                 /* Conventional PCI bus */
1109                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1110                     (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1111                     (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1112                     (0x0F);
1113         } else {
1114                 /* PCI-X bus */
1115                 /*
1116                  * The 5704 uses a different encoding of read/write
1117                  * watermarks.
1118                  */
1119                 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1120                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1121                             (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1122                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1123                 else
1124                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1125                             (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1126                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1127                             (0x0F);
1128
1129                 /*
1130                  * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1131                  * for hardware bugs.
1132                  */
1133                 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1134                     sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1135                         uint32_t tmp;
1136
1137                         tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1138                         if (tmp == 0x6 || tmp == 0x7)
1139                                 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1140                 }
1141         }
1142
1143         if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1144             sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1145             sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1146             sc->bge_asicrev == BGE_ASICREV_BCM5750)
1147                 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1148         pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1149
1150         /*
1151          * Set up general mode register.
1152          */
1153         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1154             BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1155             BGE_MODECTL_TX_NO_PHDR_CSUM);
1156
1157         /*
1158          * Disable memory write invalidate.  Apparently it is not supported
1159          * properly by these devices.
1160          */
1161         PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1162
1163         /* Set the timer prescaler (always 66Mhz) */
1164         CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1165
1166         return(0);
1167 }
1168
1169 static int
1170 bge_blockinit(struct bge_softc *sc)
1171 {
1172         struct bge_rcb *rcb;
1173         bus_size_t vrcb;
1174         bge_hostaddr taddr;
1175         int i;
1176
1177         /*
1178          * Initialize the memory window pointer register so that
1179          * we can access the first 32K of internal NIC RAM. This will
1180          * allow us to set up the TX send ring RCBs and the RX return
1181          * ring RCBs, plus other things which live in NIC memory.
1182          */
1183         CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1184
1185         /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1186
1187         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1188             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1189                 /* Configure mbuf memory pool */
1190                 if (sc->bge_extram) {
1191                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1192                             BGE_EXT_SSRAM);
1193                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1194                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1195                         else
1196                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1197                 } else {
1198                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1199                             BGE_BUFFPOOL_1);
1200                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1201                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1202                         else
1203                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1204                 }
1205
1206                 /* Configure DMA resource pool */
1207                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1208                     BGE_DMA_DESCRIPTORS);
1209                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1210         }
1211
1212         /* Configure mbuf pool watermarks */
1213         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1214             sc->bge_asicrev == BGE_ASICREV_BCM5750) {
1215                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1216                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1217         } else {
1218                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1219                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1220         }
1221         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1222
1223         /* Configure DMA resource watermarks */
1224         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1225         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1226
1227         /* Enable buffer manager */
1228         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1229             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1230                 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1231                     BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1232
1233                 /* Poll for buffer manager start indication */
1234                 for (i = 0; i < BGE_TIMEOUT; i++) {
1235                         if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1236                                 break;
1237                         DELAY(10);
1238                 }
1239
1240                 if (i == BGE_TIMEOUT) {
1241                         if_printf(&sc->arpcom.ac_if,
1242                                   "buffer manager failed to start\n");
1243                         return(ENXIO);
1244                 }
1245         }
1246
1247         /* Enable flow-through queues */
1248         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1249         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1250
1251         /* Wait until queue initialization is complete */
1252         for (i = 0; i < BGE_TIMEOUT; i++) {
1253                 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1254                         break;
1255                 DELAY(10);
1256         }
1257
1258         if (i == BGE_TIMEOUT) {
1259                 if_printf(&sc->arpcom.ac_if,
1260                           "flow-through queue init failed\n");
1261                 return(ENXIO);
1262         }
1263
1264         /* Initialize the standard RX ring control block */
1265         rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1266         rcb->bge_hostaddr.bge_addr_lo =
1267             BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1268         rcb->bge_hostaddr.bge_addr_hi =
1269             BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1270         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1271             sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1272         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1273             sc->bge_asicrev == BGE_ASICREV_BCM5750)
1274                 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1275         else
1276                 rcb->bge_maxlen_flags =
1277                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1278         if (sc->bge_extram)
1279                 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1280         else
1281                 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1282         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1283         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1284         CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1285         CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1286
1287         /*
1288          * Initialize the jumbo RX ring control block
1289          * We set the 'ring disabled' bit in the flags
1290          * field until we're actually ready to start
1291          * using this ring (i.e. once we set the MTU
1292          * high enough to require it).
1293          */
1294         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1295             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1296                 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1297
1298                 rcb->bge_hostaddr.bge_addr_lo =
1299                     BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1300                 rcb->bge_hostaddr.bge_addr_hi =
1301                     BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1302                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1303                     sc->bge_cdata.bge_rx_jumbo_ring_map,
1304                     BUS_DMASYNC_PREREAD);
1305                 rcb->bge_maxlen_flags =
1306                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1307                     BGE_RCB_FLAG_RING_DISABLED);
1308                 if (sc->bge_extram)
1309                         rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1310                 else
1311                         rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1312                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1313                     rcb->bge_hostaddr.bge_addr_hi);
1314                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1315                     rcb->bge_hostaddr.bge_addr_lo);
1316                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1317                     rcb->bge_maxlen_flags);
1318                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1319
1320                 /* Set up dummy disabled mini ring RCB */
1321                 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1322                 rcb->bge_maxlen_flags =
1323                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1324                 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1325                     rcb->bge_maxlen_flags);
1326         }
1327
1328         /*
1329          * Set the BD ring replentish thresholds. The recommended
1330          * values are 1/8th the number of descriptors allocated to
1331          * each ring.
1332          */
1333         CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1334         CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1335
1336         /*
1337          * Disable all unused send rings by setting the 'ring disabled'
1338          * bit in the flags field of all the TX send ring control blocks.
1339          * These are located in NIC memory.
1340          */
1341         vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1342         for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1343                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1344                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1345                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1346                 vrcb += sizeof(struct bge_rcb);
1347         }
1348
1349         /* Configure TX RCB 0 (we use only the first ring) */
1350         vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1351         BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1352         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1353         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1354         RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1355             BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1356         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1357             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1358                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1359                     BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1360         }
1361
1362         /* Disable all unused RX return rings */
1363         vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1364         for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1365                 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1366                 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1367                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1368                     BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1369                     BGE_RCB_FLAG_RING_DISABLED));
1370                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1371                 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1372                     (i * (sizeof(uint64_t))), 0);
1373                 vrcb += sizeof(struct bge_rcb);
1374         }
1375
1376         /* Initialize RX ring indexes */
1377         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1378         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1379         CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1380
1381         /*
1382          * Set up RX return ring 0
1383          * Note that the NIC address for RX return rings is 0x00000000.
1384          * The return rings live entirely within the host, so the
1385          * nicaddr field in the RCB isn't used.
1386          */
1387         vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1388         BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1389         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1390         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1391         RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1392         RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1393             BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1394
1395         /* Set random backoff seed for TX */
1396         CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1397             sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1398             sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1399             sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1400             BGE_TX_BACKOFF_SEED_MASK);
1401
1402         /* Set inter-packet gap */
1403         CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1404
1405         /*
1406          * Specify which ring to use for packets that don't match
1407          * any RX rules.
1408          */
1409         CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1410
1411         /*
1412          * Configure number of RX lists. One interrupt distribution
1413          * list, sixteen active lists, one bad frames class.
1414          */
1415         CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1416
1417         /* Inialize RX list placement stats mask. */
1418         CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1419         CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1420
1421         /* Disable host coalescing until we get it set up */
1422         CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1423
1424         /* Poll to make sure it's shut down. */
1425         for (i = 0; i < BGE_TIMEOUT; i++) {
1426                 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1427                         break;
1428                 DELAY(10);
1429         }
1430
1431         if (i == BGE_TIMEOUT) {
1432                 if_printf(&sc->arpcom.ac_if,
1433                           "host coalescing engine failed to idle\n");
1434                 return(ENXIO);
1435         }
1436
1437         /* Set up host coalescing defaults */
1438         CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1439         CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1440         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1441         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1442         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1443             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1444                 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1445                 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1446         }
1447         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1448         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1449
1450         /* Set up address of statistics block */
1451         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1452             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1453                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1454                     BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1455                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1456                     BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1457
1458                 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1459                 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1460                 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1461         }
1462
1463         /* Set up address of status block */
1464         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1465             BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1466         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1467             BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1468         sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1469         sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1470
1471         /* Turn on host coalescing state machine */
1472         CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1473
1474         /* Turn on RX BD completion state machine and enable attentions */
1475         CSR_WRITE_4(sc, BGE_RBDC_MODE,
1476             BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1477
1478         /* Turn on RX list placement state machine */
1479         CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1480
1481         /* Turn on RX list selector state machine. */
1482         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1483             sc->bge_asicrev != BGE_ASICREV_BCM5750)
1484                 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1485
1486         /* Turn on DMA, clear stats */
1487         CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1488             BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1489             BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1490             BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1491             (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1492
1493         /* Set misc. local control, enable interrupts on attentions */
1494         CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1495
1496 #ifdef notdef
1497         /* Assert GPIO pins for PHY reset */
1498         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1499             BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1500         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1501             BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1502 #endif
1503
1504         /* Turn on DMA completion state machine */
1505         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1506             sc->bge_asicrev != BGE_ASICREV_BCM5750)
1507                 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1508
1509         /* Turn on write DMA state machine */
1510         CSR_WRITE_4(sc, BGE_WDMA_MODE,
1511             BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1512         
1513         /* Turn on read DMA state machine */
1514         CSR_WRITE_4(sc, BGE_RDMA_MODE,
1515             BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1516
1517         /* Turn on RX data completion state machine */
1518         CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1519
1520         /* Turn on RX BD initiator state machine */
1521         CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1522
1523         /* Turn on RX data and RX BD initiator state machine */
1524         CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1525
1526         /* Turn on Mbuf cluster free state machine */
1527         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1528             sc->bge_asicrev != BGE_ASICREV_BCM5750)
1529                 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1530
1531         /* Turn on send BD completion state machine */
1532         CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1533
1534         /* Turn on send data completion state machine */
1535         CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1536
1537         /* Turn on send data initiator state machine */
1538         CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1539
1540         /* Turn on send BD initiator state machine */
1541         CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1542
1543         /* Turn on send BD selector state machine */
1544         CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1545
1546         CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1547         CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1548             BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1549
1550         /* ack/clear link change events */
1551         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1552             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1553             BGE_MACSTAT_LINK_CHANGED);
1554         CSR_WRITE_4(sc, BGE_MI_STS, 0);
1555
1556         /* Enable PHY auto polling (for MII/GMII only) */
1557         if (sc->bge_tbi) {
1558                 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1559         } else {
1560                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1561                 if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1562                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1563                             BGE_EVTENB_MI_INTERRUPT);
1564         }
1565
1566         /* Enable link state change attentions. */
1567         BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1568
1569         return(0);
1570 }
1571
1572 /*
1573  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1574  * against our list and return its name if we find a match. Note
1575  * that since the Broadcom controller contains VPD support, we
1576  * can get the device name string from the controller itself instead
1577  * of the compiled-in string. This is a little slow, but it guarantees
1578  * we'll always announce the right product name.
1579  */
1580 static int
1581 bge_probe(device_t dev)
1582 {
1583         struct bge_softc *sc;
1584         struct bge_type *t;
1585         char *descbuf;
1586         uint16_t product, vendor;
1587
1588         product = pci_get_device(dev);
1589         vendor = pci_get_vendor(dev);
1590
1591         for (t = bge_devs; t->bge_name != NULL; t++) {
1592                 if (vendor == t->bge_vid && product == t->bge_did)
1593                         break;
1594         }
1595
1596         if (t->bge_name == NULL)
1597                 return(ENXIO);
1598
1599         sc = device_get_softc(dev);
1600 #ifdef notdef
1601         sc->bge_dev = dev;
1602
1603         bge_vpd_read(sc);
1604         device_set_desc(dev, sc->bge_vpd_prodname);
1605 #endif
1606         descbuf = kmalloc(BGE_DEVDESC_MAX, M_TEMP, M_WAITOK);
1607         ksnprintf(descbuf, BGE_DEVDESC_MAX, "%s, ASIC rev. %#04x", t->bge_name,
1608             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1609         device_set_desc_copy(dev, descbuf);
1610         if (pci_get_subvendor(dev) == PCI_VENDOR_DELL)
1611                 sc->bge_no_3_led = 1;
1612         kfree(descbuf, M_TEMP);
1613         return(0);
1614 }
1615
1616 static int
1617 bge_attach(device_t dev)
1618 {
1619         struct ifnet *ifp;
1620         struct bge_softc *sc;
1621         uint32_t hwcfg = 0;
1622         uint32_t mac_addr = 0;
1623         int error = 0, rid;
1624         uint8_t ether_addr[ETHER_ADDR_LEN];
1625
1626         sc = device_get_softc(dev);
1627         sc->bge_dev = dev;
1628         callout_init(&sc->bge_stat_timer);
1629         lwkt_serialize_init(&sc->bge_jslot_serializer);
1630
1631         /*
1632          * Map control/status registers.
1633          */
1634         pci_enable_busmaster(dev);
1635
1636         rid = BGE_PCI_BAR0;
1637         sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1638             RF_ACTIVE);
1639
1640         if (sc->bge_res == NULL) {
1641                 device_printf(dev, "couldn't map memory\n");
1642                 error = ENXIO;
1643                 return(error);
1644         }
1645
1646         sc->bge_btag = rman_get_bustag(sc->bge_res);
1647         sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
1648
1649         /* Allocate interrupt */
1650         rid = 0;
1651
1652         sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1653             RF_SHAREABLE | RF_ACTIVE);
1654
1655         if (sc->bge_irq == NULL) {
1656                 device_printf(dev, "couldn't map interrupt\n");
1657                 error = ENXIO;
1658                 goto fail;
1659         }
1660
1661         /* Save ASIC rev. */
1662         sc->bge_chipid =
1663             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1664             BGE_PCIMISCCTL_ASICREV;
1665         sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
1666         sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
1667
1668         /*
1669          * Treat the 5714 and the 5752 like the 5750 until we have more info
1670          * on this chip.
1671          */
1672         if (sc->bge_asicrev == BGE_ASICREV_BCM5714 ||
1673             sc->bge_asicrev == BGE_ASICREV_BCM5752)
1674                 sc->bge_asicrev = BGE_ASICREV_BCM5750;
1675
1676         /*
1677          * XXX: Broadcom Linux driver.  Not in specs or eratta.
1678          * PCI-Express?
1679          */
1680         if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
1681                 uint32_t v;
1682
1683                 v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
1684                 if (((v >> 8) & 0xff) == BGE_PCIE_MSI_CAPID) {
1685                         v = pci_read_config(dev, BGE_PCIE_MSI_CAPID, 4);
1686                         if ((v & 0xff) == BGE_PCIE_MSI_CAPID_VAL)
1687                                 sc->bge_pcie = 1;
1688                 }
1689         }
1690
1691         ifp = &sc->arpcom.ac_if;
1692         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1693
1694         /* Try to reset the chip. */
1695         bge_reset(sc);
1696
1697         if (bge_chipinit(sc)) {
1698                 device_printf(dev, "chip initialization failed\n");
1699                 error = ENXIO;
1700                 goto fail;
1701         }
1702
1703         /*
1704          * Get station address from the EEPROM.
1705          */
1706         mac_addr = bge_readmem_ind(sc, 0x0c14);
1707         if ((mac_addr >> 16) == 0x484b) {
1708                 ether_addr[0] = (uint8_t)(mac_addr >> 8);
1709                 ether_addr[1] = (uint8_t)mac_addr;
1710                 mac_addr = bge_readmem_ind(sc, 0x0c18);
1711                 ether_addr[2] = (uint8_t)(mac_addr >> 24);
1712                 ether_addr[3] = (uint8_t)(mac_addr >> 16);
1713                 ether_addr[4] = (uint8_t)(mac_addr >> 8);
1714                 ether_addr[5] = (uint8_t)mac_addr;
1715         } else if (bge_read_eeprom(sc, ether_addr,
1716             BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
1717                 device_printf(dev, "failed to read station address\n");
1718                 error = ENXIO;
1719                 goto fail;
1720         }
1721
1722         /* 5705/5750 limits RX return ring to 512 entries. */
1723         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1724             sc->bge_asicrev == BGE_ASICREV_BCM5750)
1725                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1726         else
1727                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
1728
1729         error = bge_dma_alloc(sc);
1730         if (error)
1731                 goto fail;
1732
1733         /* Set default tuneable values. */
1734         sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
1735         sc->bge_rx_coal_ticks = 150;
1736         sc->bge_tx_coal_ticks = 150;
1737         sc->bge_rx_max_coal_bds = 64;
1738         sc->bge_tx_max_coal_bds = 128;
1739
1740         /* Set up ifnet structure */
1741         ifp->if_softc = sc;
1742         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1743         ifp->if_ioctl = bge_ioctl;
1744         ifp->if_start = bge_start;
1745         ifp->if_watchdog = bge_watchdog;
1746         ifp->if_init = bge_init;
1747         ifp->if_mtu = ETHERMTU;
1748         ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1749         ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1750         ifq_set_ready(&ifp->if_snd);
1751
1752         /*
1753          * 5700 B0 chips do not support checksumming correctly due
1754          * to hardware bugs.
1755          */
1756         if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) {
1757                 ifp->if_capabilities |= IFCAP_HWCSUM;
1758                 ifp->if_hwassist = BGE_CSUM_FEATURES;
1759         }
1760         ifp->if_capenable = ifp->if_capabilities;
1761
1762         /*
1763          * Figure out what sort of media we have by checking the
1764          * hardware config word in the first 32k of NIC internal memory,
1765          * or fall back to examining the EEPROM if necessary.
1766          * Note: on some BCM5700 cards, this value appears to be unset.
1767          * If that's the case, we have to rely on identifying the NIC
1768          * by its PCI subsystem ID, as we do below for the SysKonnect
1769          * SK-9D41.
1770          */
1771         if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
1772                 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
1773         else {
1774                 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
1775                                     sizeof(hwcfg))) {
1776                         device_printf(dev, "failed to read EEPROM\n");
1777                         error = ENXIO;
1778                         goto fail;
1779                 }
1780                 hwcfg = ntohl(hwcfg);
1781         }
1782
1783         if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
1784                 sc->bge_tbi = 1;
1785
1786         /* The SysKonnect SK-9D41 is a 1000baseSX card. */
1787         if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41)
1788                 sc->bge_tbi = 1;
1789
1790         if (sc->bge_tbi) {
1791                 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
1792                     bge_ifmedia_upd, bge_ifmedia_sts);
1793                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
1794                 ifmedia_add(&sc->bge_ifmedia,
1795                     IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
1796                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1797                 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
1798                 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
1799         } else {
1800                 /*
1801                  * Do transceiver setup.
1802                  */
1803                 if (mii_phy_probe(dev, &sc->bge_miibus,
1804                     bge_ifmedia_upd, bge_ifmedia_sts)) {
1805                         device_printf(dev, "MII without any PHY!\n");
1806                         error = ENXIO;
1807                         goto fail;
1808                 }
1809         }
1810
1811         /*
1812          * When using the BCM5701 in PCI-X mode, data corruption has
1813          * been observed in the first few bytes of some received packets.
1814          * Aligning the packet buffer in memory eliminates the corruption.
1815          * Unfortunately, this misaligns the packet payloads.  On platforms
1816          * which do not support unaligned accesses, we will realign the
1817          * payloads by copying the received packets.
1818          */
1819         switch (sc->bge_chipid) {
1820         case BGE_CHIPID_BCM5701_A0:
1821         case BGE_CHIPID_BCM5701_B0:
1822         case BGE_CHIPID_BCM5701_B2:
1823         case BGE_CHIPID_BCM5701_B5:
1824                 /* If in PCI-X mode, work around the alignment bug. */
1825                 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
1826                     (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
1827                     BGE_PCISTATE_PCI_BUSSPEED)
1828                         sc->bge_rx_alignment_bug = 1;
1829                 break;
1830         }
1831
1832         /*
1833          * Call MI attach routine.
1834          */
1835         ether_ifattach(ifp, ether_addr, NULL);
1836
1837         error = bus_setup_intr(dev, sc->bge_irq, INTR_NETSAFE,
1838                                bge_intr, sc, &sc->bge_intrhand, 
1839                                ifp->if_serializer);
1840         if (error) {
1841                 ether_ifdetach(ifp);
1842                 device_printf(dev, "couldn't set up irq\n");
1843                 goto fail;
1844         }
1845         return(0);
1846 fail:
1847         bge_detach(dev);
1848         return(error);
1849 }
1850
1851 static int
1852 bge_detach(device_t dev)
1853 {
1854         struct bge_softc *sc = device_get_softc(dev);
1855         struct ifnet *ifp = &sc->arpcom.ac_if;
1856
1857         if (device_is_attached(dev)) {
1858                 lwkt_serialize_enter(ifp->if_serializer);
1859                 bge_stop(sc);
1860                 bge_reset(sc);
1861                 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
1862                 lwkt_serialize_exit(ifp->if_serializer);
1863
1864                 ether_ifdetach(ifp);
1865         }
1866         if (sc->bge_tbi)
1867                 ifmedia_removeall(&sc->bge_ifmedia);
1868         if (sc->bge_miibus)
1869                 device_delete_child(dev, sc->bge_miibus);
1870         bus_generic_detach(dev);
1871
1872         bge_release_resources(sc);
1873         bge_dma_free(sc);
1874
1875         return 0;
1876 }
1877
1878 static void
1879 bge_release_resources(struct bge_softc *sc)
1880 {
1881         device_t dev;
1882
1883         dev = sc->bge_dev;
1884
1885         if (sc->bge_vpd_prodname != NULL)
1886                 kfree(sc->bge_vpd_prodname, M_DEVBUF);
1887
1888         if (sc->bge_vpd_readonly != NULL)
1889                 kfree(sc->bge_vpd_readonly, M_DEVBUF);
1890
1891         if (sc->bge_irq != NULL)
1892                 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
1893
1894         if (sc->bge_res != NULL)
1895                 bus_release_resource(dev, SYS_RES_MEMORY,
1896                     BGE_PCI_BAR0, sc->bge_res);
1897 }
1898
1899 static void
1900 bge_reset(struct bge_softc *sc)
1901 {
1902         device_t dev;
1903         uint32_t cachesize, command, pcistate, reset;
1904         int i, val = 0;
1905
1906         dev = sc->bge_dev;
1907
1908         /* Save some important PCI state. */
1909         cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
1910         command = pci_read_config(dev, BGE_PCI_CMD, 4);
1911         pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
1912
1913         pci_write_config(dev, BGE_PCI_MISC_CTL,
1914             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1915             BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1916
1917         reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
1918
1919         /* XXX: Broadcom Linux driver. */
1920         if (sc->bge_pcie) {
1921                 if (CSR_READ_4(sc, 0x7e2c) == 0x60)     /* PCIE 1.0 */
1922                         CSR_WRITE_4(sc, 0x7e2c, 0x20);
1923                 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
1924                         /* Prevent PCIE link training during global reset */
1925                         CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
1926                         reset |= (1<<29);
1927                 }
1928         }
1929
1930         /* Issue global reset */
1931         bge_writereg_ind(sc, BGE_MISC_CFG, reset);
1932
1933         DELAY(1000);
1934
1935         /* XXX: Broadcom Linux driver. */
1936         if (sc->bge_pcie) {
1937                 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
1938                         uint32_t v;
1939
1940                         DELAY(500000); /* wait for link training to complete */
1941                         v = pci_read_config(dev, 0xc4, 4);
1942                         pci_write_config(dev, 0xc4, v | (1<<15), 4);
1943                 }
1944                 /* Set PCIE max payload size and clear error status. */
1945                 pci_write_config(dev, 0xd8, 0xf5000, 4);
1946         }
1947
1948         /* Reset some of the PCI state that got zapped by reset */
1949         pci_write_config(dev, BGE_PCI_MISC_CTL,
1950             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1951             BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1952         pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
1953         pci_write_config(dev, BGE_PCI_CMD, command, 4);
1954         bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
1955
1956         /* Enable memory arbiter. */
1957         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1958                 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
1959
1960         /*
1961          * Prevent PXE restart: write a magic number to the
1962          * general communications memory at 0xB50.
1963          */
1964         bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1965         /*
1966          * Poll the value location we just wrote until
1967          * we see the 1's complement of the magic number.
1968          * This indicates that the firmware initialization
1969          * is complete.
1970          */
1971         for (i = 0; i < BGE_TIMEOUT; i++) {
1972                 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
1973                 if (val == ~BGE_MAGIC_NUMBER)
1974                         break;
1975                 DELAY(10);
1976         }
1977         
1978         if (i == BGE_TIMEOUT) {
1979                 if_printf(&sc->arpcom.ac_if, "firmware handshake timed out\n");
1980                 return;
1981         }
1982
1983         /*
1984          * XXX Wait for the value of the PCISTATE register to
1985          * return to its original pre-reset state. This is a
1986          * fairly good indicator of reset completion. If we don't
1987          * wait for the reset to fully complete, trying to read
1988          * from the device's non-PCI registers may yield garbage
1989          * results.
1990          */
1991         for (i = 0; i < BGE_TIMEOUT; i++) {
1992                 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
1993                         break;
1994                 DELAY(10);
1995         }
1996
1997         /* Fix up byte swapping */
1998         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
1999             BGE_MODECTL_BYTESWAP_DATA);
2000
2001         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2002
2003         /*
2004          * The 5704 in TBI mode apparently needs some special
2005          * adjustment to insure the SERDES drive level is set
2006          * to 1.2V.
2007          */
2008         if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) {
2009                 uint32_t serdescfg;
2010
2011                 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2012                 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2013                 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2014         }
2015
2016         /* XXX: Broadcom Linux driver. */
2017         if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2018                 uint32_t v;
2019
2020                 v = CSR_READ_4(sc, 0x7c00);
2021                 CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2022         }
2023
2024         DELAY(10000);
2025 }
2026
2027 /*
2028  * Frame reception handling. This is called if there's a frame
2029  * on the receive return list.
2030  *
2031  * Note: we have to be able to handle two possibilities here:
2032  * 1) the frame is from the jumbo recieve ring
2033  * 2) the frame is from the standard receive ring
2034  */
2035
2036 static void
2037 bge_rxeof(struct bge_softc *sc)
2038 {
2039         struct ifnet *ifp;
2040         int stdcnt = 0, jumbocnt = 0;
2041
2042         if (sc->bge_rx_saved_considx ==
2043             sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx)
2044                 return;
2045
2046         ifp = &sc->arpcom.ac_if;
2047
2048         bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2049                         sc->bge_cdata.bge_rx_return_ring_map,
2050                         BUS_DMASYNC_POSTREAD);
2051         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2052                         sc->bge_cdata.bge_rx_std_ring_map,
2053                         BUS_DMASYNC_POSTREAD);
2054         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2055             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2056                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2057                                 sc->bge_cdata.bge_rx_jumbo_ring_map,
2058                                 BUS_DMASYNC_POSTREAD);
2059         }
2060
2061         while (sc->bge_rx_saved_considx !=
2062                sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2063                 struct bge_rx_bd        *cur_rx;
2064                 uint32_t                rxidx;
2065                 struct mbuf             *m = NULL;
2066                 uint16_t                vlan_tag = 0;
2067                 int                     have_tag = 0;
2068
2069                 cur_rx =
2070             &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2071
2072                 rxidx = cur_rx->bge_idx;
2073                 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2074
2075                 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2076                         have_tag = 1;
2077                         vlan_tag = cur_rx->bge_vlan_tag;
2078                 }
2079
2080                 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2081                         BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2082                         m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2083                         sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2084                         jumbocnt++;
2085                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2086                                 ifp->if_ierrors++;
2087                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2088                                 continue;
2089                         }
2090                         if (bge_newbuf_jumbo(sc,
2091                             sc->bge_jumbo, NULL) == ENOBUFS) {
2092                                 ifp->if_ierrors++;
2093                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2094                                 continue;
2095                         }
2096                 } else {
2097                         BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2098                         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2099                                         sc->bge_cdata.bge_rx_std_dmamap[rxidx],
2100                                         BUS_DMASYNC_POSTREAD);
2101                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2102                                 sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
2103                         m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2104                         sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2105                         stdcnt++;
2106                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2107                                 ifp->if_ierrors++;
2108                                 bge_newbuf_std(sc, sc->bge_std, m);
2109                                 continue;
2110                         }
2111                         if (bge_newbuf_std(sc, sc->bge_std,
2112                             NULL) == ENOBUFS) {
2113                                 ifp->if_ierrors++;
2114                                 bge_newbuf_std(sc, sc->bge_std, m);
2115                                 continue;
2116                         }
2117                 }
2118
2119                 ifp->if_ipackets++;
2120 #ifndef __i386__
2121                 /*
2122                  * The i386 allows unaligned accesses, but for other
2123                  * platforms we must make sure the payload is aligned.
2124                  */
2125                 if (sc->bge_rx_alignment_bug) {
2126                         bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2127                             cur_rx->bge_len);
2128                         m->m_data += ETHER_ALIGN;
2129                 }
2130 #endif
2131                 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2132                 m->m_pkthdr.rcvif = ifp;
2133
2134                 if (ifp->if_capenable & IFCAP_RXCSUM) {
2135                         if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2136                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2137                                 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2138                                         m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2139                         }
2140                         if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
2141                             m->m_pkthdr.len >= BGE_MIN_FRAME) {
2142                                 m->m_pkthdr.csum_data =
2143                                     cur_rx->bge_tcp_udp_csum;
2144                                 m->m_pkthdr.csum_flags |=
2145                                         CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2146                         }
2147                 }
2148
2149                 /*
2150                  * If we received a packet with a vlan tag, pass it
2151                  * to vlan_input() instead of ether_input().
2152                  */
2153                 if (have_tag) {
2154                         VLAN_INPUT_TAG(m, vlan_tag);
2155                         have_tag = vlan_tag = 0;
2156                 } else {
2157                         ifp->if_input(ifp, m);
2158                 }
2159         }
2160
2161         if (stdcnt > 0) {
2162                 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2163                                 sc->bge_cdata.bge_rx_std_ring_map,
2164                                 BUS_DMASYNC_PREWRITE);
2165         }
2166
2167         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2168             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2169                 if (jumbocnt > 0) {
2170                         bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2171                                         sc->bge_cdata.bge_rx_jumbo_ring_map,
2172                                         BUS_DMASYNC_PREWRITE);
2173                 }
2174         }
2175
2176         CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2177         if (stdcnt)
2178                 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2179         if (jumbocnt)
2180                 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2181 }
2182
2183 static void
2184 bge_txeof(struct bge_softc *sc)
2185 {
2186         struct bge_tx_bd *cur_tx = NULL;
2187         struct ifnet *ifp;
2188
2189         if (sc->bge_tx_saved_considx ==
2190             sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
2191                 return;
2192
2193         ifp = &sc->arpcom.ac_if;
2194
2195         bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
2196                         sc->bge_cdata.bge_tx_ring_map,
2197                         BUS_DMASYNC_POSTREAD);
2198
2199         /*
2200          * Go through our tx ring and free mbufs for those
2201          * frames that have been sent.
2202          */
2203         while (sc->bge_tx_saved_considx !=
2204                sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2205                 uint32_t idx = 0;
2206
2207                 idx = sc->bge_tx_saved_considx;
2208                 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2209                 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2210                         ifp->if_opackets++;
2211                 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2212                         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2213                                         sc->bge_cdata.bge_tx_dmamap[idx],
2214                                         BUS_DMASYNC_POSTWRITE);
2215                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2216                             sc->bge_cdata.bge_tx_dmamap[idx]);
2217                         m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2218                         sc->bge_cdata.bge_tx_chain[idx] = NULL;
2219                 }
2220                 sc->bge_txcnt--;
2221                 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2222                 ifp->if_timer = 0;
2223         }
2224
2225         if (cur_tx != NULL &&
2226             (BGE_TX_RING_CNT - sc->bge_txcnt) >=
2227             (BGE_NSEG_RSVD + BGE_NSEG_SPARE))
2228                 ifp->if_flags &= ~IFF_OACTIVE;
2229
2230         if (!ifq_is_empty(&ifp->if_snd))
2231                 ifp->if_start(ifp);
2232 }
2233
2234 static void
2235 bge_intr(void *xsc)
2236 {
2237         struct bge_softc *sc = xsc;
2238         struct ifnet *ifp = &sc->arpcom.ac_if;
2239         uint32_t status, statusword, mimode;
2240
2241         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2242                         sc->bge_cdata.bge_status_map,
2243                         BUS_DMASYNC_POSTREAD);
2244
2245         /* XXX */
2246         statusword = loadandclear(&sc->bge_ldata.bge_status_block->bge_status);
2247
2248 #ifdef notdef
2249         /* Avoid this for now -- checking this register is expensive. */
2250         /* Make sure this is really our interrupt. */
2251         if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2252                 return;
2253 #endif
2254         /* Ack interrupt and stop others from occuring. */
2255         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2256
2257         /*
2258          * Process link state changes.
2259          * Grrr. The link status word in the status block does
2260          * not work correctly on the BCM5700 rev AX and BX chips,
2261          * according to all available information. Hence, we have
2262          * to enable MII interrupts in order to properly obtain
2263          * async link changes. Unfortunately, this also means that
2264          * we have to read the MAC status register to detect link
2265          * changes, thereby adding an additional register access to
2266          * the interrupt handler.
2267          */
2268
2269         if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
2270                 status = CSR_READ_4(sc, BGE_MAC_STS);
2271                 if (status & BGE_MACSTAT_MI_INTERRUPT) {
2272                         sc->bge_link = 0;
2273                         callout_stop(&sc->bge_stat_timer);
2274                         bge_tick_serialized(sc);
2275                         /* Clear the interrupt */
2276                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2277                             BGE_EVTENB_MI_INTERRUPT);
2278                         bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
2279                         bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
2280                             BRGPHY_INTRS);
2281                 }
2282         } else {
2283                 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) {
2284                         /*
2285                          * Sometimes PCS encoding errors are detected in
2286                          * TBI mode (on fiber NICs), and for some reason
2287                          * the chip will signal them as link changes.
2288                          * If we get a link change event, but the 'PCS
2289                          * encoding error' bit in the MAC status register
2290                          * is set, don't bother doing a link check.
2291                          * This avoids spurious "gigabit link up" messages
2292                          * that sometimes appear on fiber NICs during
2293                          * periods of heavy traffic. (There should be no
2294                          * effect on copper NICs.)
2295                          *
2296                          * If we do have a copper NIC (bge_tbi == 0) then
2297                          * check that the AUTOPOLL bit is set before
2298                          * processing the event as a real link change.
2299                          * Turning AUTOPOLL on and off in the MII read/write
2300                          * functions will often trigger a link status
2301                          * interrupt for no reason.
2302                          */
2303                         status = CSR_READ_4(sc, BGE_MAC_STS);
2304                         mimode = CSR_READ_4(sc, BGE_MI_MODE);
2305                         if (!(status & (BGE_MACSTAT_PORT_DECODE_ERROR |
2306                                         BGE_MACSTAT_MI_COMPLETE)) &&
2307                             (!sc->bge_tbi && (mimode & BGE_MIMODE_AUTOPOLL))) {
2308                                 sc->bge_link = 0;
2309                                 callout_stop(&sc->bge_stat_timer);
2310                                 bge_tick_serialized(sc);
2311                         }
2312                         sc->bge_link = 0;
2313                         callout_stop(&sc->bge_stat_timer);
2314                         bge_tick_serialized(sc);
2315                         /* Clear the interrupt */
2316                         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
2317                             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
2318                             BGE_MACSTAT_LINK_CHANGED);
2319
2320                         /* Force flush the status block cached by PCI bridge */
2321                         CSR_READ_4(sc, BGE_MBX_IRQ0_LO);
2322                 }
2323         }
2324
2325         if (ifp->if_flags & IFF_RUNNING) {
2326                 /* Check RX return ring producer/consumer */
2327                 bge_rxeof(sc);
2328
2329                 /* Check TX ring producer/consumer */
2330                 bge_txeof(sc);
2331         }
2332
2333         bge_handle_events(sc);
2334
2335         /* Re-enable interrupts. */
2336         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2337 }
2338
2339 static void
2340 bge_tick(void *xsc)
2341 {
2342         struct bge_softc *sc = xsc;
2343         struct ifnet *ifp = &sc->arpcom.ac_if;
2344
2345         lwkt_serialize_enter(ifp->if_serializer);
2346         bge_tick_serialized(xsc);
2347         lwkt_serialize_exit(ifp->if_serializer);
2348 }
2349
2350 static void
2351 bge_tick_serialized(void *xsc)
2352 {
2353         struct bge_softc *sc = xsc;
2354         struct ifnet *ifp = &sc->arpcom.ac_if;
2355         struct mii_data *mii = NULL;
2356         struct ifmedia *ifm = NULL;
2357
2358         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2359             sc->bge_asicrev == BGE_ASICREV_BCM5750)
2360                 bge_stats_update_regs(sc);
2361         else
2362                 bge_stats_update(sc);
2363
2364         callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2365
2366         if (sc->bge_link) {
2367                 return;
2368         }
2369
2370         if (sc->bge_tbi) {
2371                 ifm = &sc->bge_ifmedia;
2372                 if (CSR_READ_4(sc, BGE_MAC_STS) &
2373                     BGE_MACSTAT_TBI_PCS_SYNCHED) {
2374                         sc->bge_link++;
2375                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
2376                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
2377                                            BGE_MACMODE_TBI_SEND_CFGS);
2378                         }
2379                         CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
2380                         if_printf(ifp, "gigabit link up\n");
2381                         if (!ifq_is_empty(&ifp->if_snd))
2382                                 ifp->if_start(ifp);
2383                 }
2384                 return;
2385         }
2386
2387         mii = device_get_softc(sc->bge_miibus);
2388         mii_tick(mii);
2389  
2390         if (!sc->bge_link) {
2391                 mii_pollstat(mii);
2392                 if (mii->mii_media_status & IFM_ACTIVE &&
2393                     IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2394                         sc->bge_link++;
2395                         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
2396                             IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
2397                                 if_printf(ifp, "gigabit link up\n");
2398                         if (!ifq_is_empty(&ifp->if_snd))
2399                                 ifp->if_start(ifp);
2400                 }
2401         }
2402 }
2403
2404 static void
2405 bge_stats_update_regs(struct bge_softc *sc)
2406 {
2407         struct ifnet *ifp = &sc->arpcom.ac_if;
2408         struct bge_mac_stats_regs stats;
2409         uint32_t *s;
2410         int i;
2411
2412         s = (uint32_t *)&stats;
2413         for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2414                 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2415                 s++;
2416         }
2417
2418         ifp->if_collisions +=
2419            (stats.dot3StatsSingleCollisionFrames +
2420            stats.dot3StatsMultipleCollisionFrames +
2421            stats.dot3StatsExcessiveCollisions +
2422            stats.dot3StatsLateCollisions) -
2423            ifp->if_collisions;
2424 }
2425
2426 static void
2427 bge_stats_update(struct bge_softc *sc)
2428 {
2429         struct ifnet *ifp = &sc->arpcom.ac_if;
2430         bus_size_t stats;
2431
2432         stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2433
2434 #define READ_STAT(sc, stats, stat)      \
2435         CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2436
2437         ifp->if_collisions +=
2438            (READ_STAT(sc, stats,
2439                 txstats.dot3StatsSingleCollisionFrames.bge_addr_lo) +
2440             READ_STAT(sc, stats,
2441                 txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo) +
2442             READ_STAT(sc, stats,
2443                 txstats.dot3StatsExcessiveCollisions.bge_addr_lo) +
2444             READ_STAT(sc, stats,
2445                 txstats.dot3StatsLateCollisions.bge_addr_lo)) -
2446            ifp->if_collisions;
2447
2448 #undef READ_STAT
2449
2450 #ifdef notdef
2451         ifp->if_collisions +=
2452            (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2453            sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2454            sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2455            sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2456            ifp->if_collisions;
2457 #endif
2458 }
2459
2460 /*
2461  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
2462  * pointers to descriptors.
2463  */
2464 static int
2465 bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx)
2466 {
2467         struct bge_tx_bd *d = NULL;
2468         uint16_t csum_flags = 0;
2469         struct ifvlan *ifv = NULL;
2470         struct bge_dmamap_arg ctx;
2471         bus_dma_segment_t segs[BGE_NSEG_NEW];
2472         bus_dmamap_t map;
2473         int error, maxsegs, idx, i;
2474
2475         if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
2476             m_head->m_pkthdr.rcvif != NULL &&
2477             m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
2478                 ifv = m_head->m_pkthdr.rcvif->if_softc;
2479
2480         if (m_head->m_pkthdr.csum_flags) {
2481                 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2482                         csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2483                 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2484                         csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2485                 if (m_head->m_flags & M_LASTFRAG)
2486                         csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2487                 else if (m_head->m_flags & M_FRAG)
2488                         csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2489         }
2490
2491         idx = *txidx;
2492         map = sc->bge_cdata.bge_tx_dmamap[idx];
2493
2494         maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - BGE_NSEG_RSVD;
2495         KASSERT(maxsegs >= BGE_NSEG_SPARE,
2496                 ("not enough segments %d\n", maxsegs));
2497
2498         if (maxsegs > BGE_NSEG_NEW)
2499                 maxsegs = BGE_NSEG_NEW;
2500
2501         /*
2502          * Pad outbound frame to BGE_MIN_FRAME for an unusual reason.
2503          * The bge hardware will pad out Tx runts to BGE_MIN_FRAME,
2504          * but when such padded frames employ the bge IP/TCP checksum
2505          * offload, the hardware checksum assist gives incorrect results
2506          * (possibly from incorporating its own padding into the UDP/TCP
2507          * checksum; who knows).  If we pad such runts with zeros, the
2508          * onboard checksum comes out correct.  We do this by pretending
2509          * the mbuf chain has too many fragments so the coalescing code
2510          * below can assemble the packet into a single buffer that's
2511          * padded out to the mininum frame size.
2512          */
2513         if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
2514             m_head->m_pkthdr.len < BGE_MIN_FRAME) {
2515                 error = E2BIG;
2516         } else {
2517                 ctx.bge_segs = segs;
2518                 ctx.bge_maxsegs = maxsegs;
2519                 error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map,
2520                                              m_head, bge_dma_map_mbuf, &ctx,
2521                                              BUS_DMA_NOWAIT);
2522         }
2523         if (error == E2BIG || ctx.bge_maxsegs == 0) {
2524                 struct mbuf *m_new;
2525
2526                 m_new = m_defrag(m_head, MB_DONTWAIT);
2527                 if (m_new == NULL) {
2528                         if_printf(&sc->arpcom.ac_if,
2529                                   "could not defrag TX mbuf\n");
2530                         error = ENOBUFS;
2531                         goto back;
2532                 } else {
2533                         m_head = m_new;
2534                 }
2535
2536                 /*
2537                  * Manually pad short frames, and zero the pad space
2538                  * to avoid leaking data.
2539                  */
2540                 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
2541                     m_head->m_pkthdr.len < BGE_MIN_FRAME) {
2542                         int pad_len = BGE_MIN_FRAME - m_head->m_pkthdr.len;
2543
2544                         bzero(mtod(m_head, char *) + m_head->m_pkthdr.len,
2545                               pad_len);
2546                         m_head->m_pkthdr.len += pad_len;
2547                         m_head->m_len = m_head->m_pkthdr.len;
2548                 }
2549
2550                 ctx.bge_segs = segs;
2551                 ctx.bge_maxsegs = maxsegs;
2552                 error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map,
2553                                              m_head, bge_dma_map_mbuf, &ctx,
2554                                              BUS_DMA_NOWAIT);
2555                 if (error || ctx.bge_maxsegs == 0) {
2556                         if_printf(&sc->arpcom.ac_if,
2557                                   "could not defrag TX mbuf\n");
2558                         if (error == 0)
2559                                 error = E2BIG;
2560                         goto back;
2561                 }
2562         } else if (error) {
2563                 if_printf(&sc->arpcom.ac_if, "could not map TX mbuf\n");
2564                 goto back;
2565         }
2566
2567         bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE);
2568
2569         for (i = 0; ; i++) {
2570                 d = &sc->bge_ldata.bge_tx_ring[idx];
2571
2572                 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_segs[i].ds_addr);
2573                 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_segs[i].ds_addr);
2574                 d->bge_len = segs[i].ds_len;
2575                 d->bge_flags = csum_flags;
2576
2577                 if (i == ctx.bge_maxsegs - 1)
2578                         break;
2579                 BGE_INC(idx, BGE_TX_RING_CNT);
2580         }
2581         /* Mark the last segment as end of packet... */
2582         d->bge_flags |= BGE_TXBDFLAG_END;
2583
2584         /* Set vlan tag to the first segment of the packet. */
2585         d = &sc->bge_ldata.bge_tx_ring[*txidx];
2586         if (ifv != NULL) {
2587                 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2588                 d->bge_vlan_tag = ifv->ifv_tag;
2589         } else {
2590                 d->bge_vlan_tag = 0;
2591         }
2592
2593         /*
2594          * Insure that the map for this transmission is placed at
2595          * the array index of the last descriptor in this chain.
2596          */
2597         sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
2598         sc->bge_cdata.bge_tx_dmamap[idx] = map;
2599         sc->bge_cdata.bge_tx_chain[idx] = m_head;
2600         sc->bge_txcnt += ctx.bge_maxsegs;
2601
2602         BGE_INC(idx, BGE_TX_RING_CNT);
2603         *txidx = idx;
2604 back:
2605         if (error)
2606                 m_freem(m_head);
2607         return error;
2608 }
2609
2610 /*
2611  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2612  * to the mbuf data regions directly in the transmit descriptors.
2613  */
2614 static void
2615 bge_start(struct ifnet *ifp)
2616 {
2617         struct bge_softc *sc = ifp->if_softc;
2618         struct mbuf *m_head = NULL;
2619         uint32_t prodidx;
2620         int need_trans;
2621
2622         if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING ||
2623             !sc->bge_link)
2624                 return;
2625
2626         prodidx = sc->bge_tx_prodidx;
2627
2628         need_trans = 0;
2629         while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
2630                 m_head = ifq_poll(&ifp->if_snd);
2631                 if (m_head == NULL)
2632                         break;
2633
2634                 /*
2635                  * XXX
2636                  * The code inside the if() block is never reached since we
2637                  * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
2638                  * requests to checksum TCP/UDP in a fragmented packet.
2639                  * 
2640                  * XXX
2641                  * safety overkill.  If this is a fragmented packet chain
2642                  * with delayed TCP/UDP checksums, then only encapsulate
2643                  * it if we have enough descriptors to handle the entire
2644                  * chain at once.
2645                  * (paranoia -- may not actually be needed)
2646                  */
2647                 if (m_head->m_flags & M_FIRSTFRAG &&
2648                     m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
2649                         if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2650                             m_head->m_pkthdr.csum_data + 16) {
2651                                 ifp->if_flags |= IFF_OACTIVE;
2652                                 break;
2653                         }
2654                 }
2655
2656                 /*
2657                  * Sanity check: avoid coming within BGE_NSEG_RSVD
2658                  * descriptors of the end of the ring.  Also make
2659                  * sure there are BGE_NSEG_SPARE descriptors for
2660                  * jumbo buffers' defragmentation.
2661                  */
2662                 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2663                     (BGE_NSEG_RSVD + BGE_NSEG_SPARE)) {
2664                         ifp->if_flags |= IFF_OACTIVE;
2665                         break;
2666                 }
2667
2668                 /*
2669                  * Dequeue the packet before encapsulation, since
2670                  * bge_encap() may free the packet if error happens.
2671                  */
2672                 ifq_dequeue(&ifp->if_snd, m_head);
2673
2674                 /*
2675                  * Pack the data into the transmit ring. If we
2676                  * don't have room, set the OACTIVE flag and wait
2677                  * for the NIC to drain the ring.
2678                  */
2679                 if (bge_encap(sc, m_head, &prodidx)) {
2680                         ifp->if_flags |= IFF_OACTIVE;
2681                         break;
2682                 }
2683                 need_trans = 1;
2684
2685                 BPF_MTAP(ifp, m_head);
2686         }
2687
2688         if (!need_trans)
2689                 return;
2690
2691         /* Transmit */
2692         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2693         /* 5700 b2 errata */
2694         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
2695                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2696
2697         sc->bge_tx_prodidx = prodidx;
2698
2699         /*
2700          * Set a timeout in case the chip goes out to lunch.
2701          */
2702         ifp->if_timer = 5;
2703 }
2704
2705 static void
2706 bge_init(void *xsc)
2707 {
2708         struct bge_softc *sc = xsc;
2709         struct ifnet *ifp = &sc->arpcom.ac_if;
2710         uint16_t *m;
2711
2712         ASSERT_SERIALIZED(ifp->if_serializer);
2713
2714         if (ifp->if_flags & IFF_RUNNING)
2715                 return;
2716
2717         /* Cancel pending I/O and flush buffers. */
2718         bge_stop(sc);
2719         bge_reset(sc);
2720         bge_chipinit(sc);
2721
2722         /*
2723          * Init the various state machines, ring
2724          * control blocks and firmware.
2725          */
2726         if (bge_blockinit(sc)) {
2727                 if_printf(ifp, "initialization failure\n");
2728                 return;
2729         }
2730
2731         /* Specify MTU. */
2732         CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
2733             ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
2734
2735         /* Load our MAC address. */
2736         m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
2737         CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2738         CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2739
2740         /* Enable or disable promiscuous mode as needed. */
2741         bge_setpromisc(sc);
2742
2743         /* Program multicast filter. */
2744         bge_setmulti(sc);
2745
2746         /* Init RX ring. */
2747         bge_init_rx_ring_std(sc);
2748
2749         /*
2750          * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
2751          * memory to insure that the chip has in fact read the first
2752          * entry of the ring.
2753          */
2754         if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
2755                 uint32_t                v, i;
2756                 for (i = 0; i < 10; i++) {
2757                         DELAY(20);
2758                         v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
2759                         if (v == (MCLBYTES - ETHER_ALIGN))
2760                                 break;
2761                 }
2762                 if (i == 10)
2763                         if_printf(ifp, "5705 A0 chip failed to load RX ring\n");
2764         }
2765
2766         /* Init jumbo RX ring. */
2767         if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2768                 bge_init_rx_ring_jumbo(sc);
2769
2770         /* Init our RX return ring index */
2771         sc->bge_rx_saved_considx = 0;
2772
2773         /* Init TX ring. */
2774         bge_init_tx_ring(sc);
2775
2776         /* Turn on transmitter */
2777         BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
2778
2779         /* Turn on receiver */
2780         BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2781
2782         /* Tell firmware we're alive. */
2783         BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2784
2785         /* Enable host interrupts. */
2786         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
2787         BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2788         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2789
2790         bge_ifmedia_upd(ifp);
2791
2792         ifp->if_flags |= IFF_RUNNING;
2793         ifp->if_flags &= ~IFF_OACTIVE;
2794
2795         callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2796 }
2797
2798 /*
2799  * Set media options.
2800  */
2801 static int
2802 bge_ifmedia_upd(struct ifnet *ifp)
2803 {
2804         struct bge_softc *sc = ifp->if_softc;
2805         struct ifmedia *ifm = &sc->bge_ifmedia;
2806         struct mii_data *mii;
2807
2808         /* If this is a 1000baseX NIC, enable the TBI port. */
2809         if (sc->bge_tbi) {
2810                 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2811                         return(EINVAL);
2812                 switch(IFM_SUBTYPE(ifm->ifm_media)) {
2813                 case IFM_AUTO:
2814                         /*
2815                          * The BCM5704 ASIC appears to have a special
2816                          * mechanism for programming the autoneg
2817                          * advertisement registers in TBI mode.
2818                          */
2819                         if (!bge_fake_autoneg &&
2820                             sc->bge_asicrev == BGE_ASICREV_BCM5704) {
2821                                 uint32_t sgdig;
2822
2823                                 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
2824                                 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
2825                                 sgdig |= BGE_SGDIGCFG_AUTO |
2826                                          BGE_SGDIGCFG_PAUSE_CAP |
2827                                          BGE_SGDIGCFG_ASYM_PAUSE;
2828                                 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
2829                                             sgdig | BGE_SGDIGCFG_SEND);
2830                                 DELAY(5);
2831                                 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
2832                         }
2833                         break;
2834                 case IFM_1000_SX:
2835                         if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
2836                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
2837                                     BGE_MACMODE_HALF_DUPLEX);
2838                         } else {
2839                                 BGE_SETBIT(sc, BGE_MAC_MODE,
2840                                     BGE_MACMODE_HALF_DUPLEX);
2841                         }
2842                         break;
2843                 default:
2844                         return(EINVAL);
2845                 }
2846                 return(0);
2847         }
2848
2849         mii = device_get_softc(sc->bge_miibus);
2850         sc->bge_link = 0;
2851         if (mii->mii_instance) {
2852                 struct mii_softc *miisc;
2853                 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2854                         mii_phy_reset(miisc);
2855         }
2856         mii_mediachg(mii);
2857
2858         return(0);
2859 }
2860
2861 /*
2862  * Report current media status.
2863  */
2864 static void
2865 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2866 {
2867         struct bge_softc *sc = ifp->if_softc;
2868         struct mii_data *mii;
2869
2870         if (sc->bge_tbi) {
2871                 ifmr->ifm_status = IFM_AVALID;
2872                 ifmr->ifm_active = IFM_ETHER;
2873                 if (CSR_READ_4(sc, BGE_MAC_STS) &
2874                     BGE_MACSTAT_TBI_PCS_SYNCHED)
2875                         ifmr->ifm_status |= IFM_ACTIVE;
2876                 ifmr->ifm_active |= IFM_1000_SX;
2877                 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
2878                         ifmr->ifm_active |= IFM_HDX;    
2879                 else
2880                         ifmr->ifm_active |= IFM_FDX;
2881                 return;
2882         }
2883
2884         mii = device_get_softc(sc->bge_miibus);
2885         mii_pollstat(mii);
2886         ifmr->ifm_active = mii->mii_media_active;
2887         ifmr->ifm_status = mii->mii_media_status;
2888 }
2889
2890 static int
2891 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
2892 {
2893         struct bge_softc *sc = ifp->if_softc;
2894         struct ifreq *ifr = (struct ifreq *) data;
2895         int mask, error = 0;
2896         struct mii_data *mii;
2897
2898         ASSERT_SERIALIZED(ifp->if_serializer);
2899
2900         switch(command) {
2901         case SIOCSIFMTU:
2902                 /* Disallow jumbo frames on 5705/5750. */
2903                 if (((sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2904                       sc->bge_asicrev == BGE_ASICREV_BCM5750) &&
2905                      ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
2906                         error = EINVAL;
2907                 else {
2908                         ifp->if_mtu = ifr->ifr_mtu;
2909                         ifp->if_flags &= ~IFF_RUNNING;
2910                         bge_init(sc);
2911                 }
2912                 break;
2913         case SIOCSIFFLAGS:
2914                 if (ifp->if_flags & IFF_UP) {
2915                         if (ifp->if_flags & IFF_RUNNING) {
2916                                 int flags = ifp->if_flags & sc->bge_if_flags;
2917
2918                                 /*
2919                                  * If only the state of the PROMISC flag
2920                                  * changed, then just use the 'set promisc
2921                                  * mode' command instead of reinitializing
2922                                  * the entire NIC. Doing a full re-init
2923                                  * means reloading the firmware and waiting
2924                                  * for it to start up, which may take a
2925                                  * second or two.  Similarly for ALLMULTI.
2926                                  */
2927                                 if (flags & IFF_PROMISC)
2928                                         bge_setpromisc(sc);
2929                                 if (flags & IFF_ALLMULTI)
2930                                         bge_setmulti(sc);
2931                         } else {
2932                                 bge_init(sc);
2933                         }
2934                 } else {
2935                         if (ifp->if_flags & IFF_RUNNING)
2936                                 bge_stop(sc);
2937                 }
2938                 sc->bge_if_flags = ifp->if_flags;
2939                 error = 0;
2940                 break;
2941         case SIOCADDMULTI:
2942         case SIOCDELMULTI:
2943                 if (ifp->if_flags & IFF_RUNNING) {
2944                         bge_setmulti(sc);
2945                         error = 0;
2946                 }
2947                 break;
2948         case SIOCSIFMEDIA:
2949         case SIOCGIFMEDIA:
2950                 if (sc->bge_tbi) {
2951                         error = ifmedia_ioctl(ifp, ifr,
2952                             &sc->bge_ifmedia, command);
2953                 } else {
2954                         mii = device_get_softc(sc->bge_miibus);
2955                         error = ifmedia_ioctl(ifp, ifr,
2956                             &mii->mii_media, command);
2957                 }
2958                 break;
2959         case SIOCSIFCAP:
2960                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2961                 if (mask & IFCAP_HWCSUM) {
2962                         ifp->if_capenable ^= IFCAP_HWCSUM;
2963                         if (IFCAP_HWCSUM & ifp->if_capenable)
2964                                 ifp->if_hwassist = BGE_CSUM_FEATURES;
2965                         else
2966                                 ifp->if_hwassist = 0;
2967                 }
2968                 error = 0;
2969                 break;
2970         default:
2971                 error = ether_ioctl(ifp, command, data);
2972                 break;
2973         }
2974         return(error);
2975 }
2976
2977 static void
2978 bge_watchdog(struct ifnet *ifp)
2979 {
2980         struct bge_softc *sc = ifp->if_softc;
2981
2982         if_printf(ifp, "watchdog timeout -- resetting\n");
2983
2984         ifp->if_flags &= ~IFF_RUNNING;
2985         bge_init(sc);
2986
2987         ifp->if_oerrors++;
2988
2989         if (!ifq_is_empty(&ifp->if_snd))
2990                 ifp->if_start(ifp);
2991 }
2992
2993 /*
2994  * Stop the adapter and free any mbufs allocated to the
2995  * RX and TX lists.
2996  */
2997 static void
2998 bge_stop(struct bge_softc *sc)
2999 {
3000         struct ifnet *ifp = &sc->arpcom.ac_if;
3001         struct ifmedia_entry *ifm;
3002         struct mii_data *mii = NULL;
3003         int mtmp, itmp;
3004
3005         ASSERT_SERIALIZED(ifp->if_serializer);
3006
3007         if (!sc->bge_tbi)
3008                 mii = device_get_softc(sc->bge_miibus);
3009
3010         callout_stop(&sc->bge_stat_timer);
3011
3012         /*
3013          * Disable all of the receiver blocks
3014          */
3015         BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3016         BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3017         BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3018         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3019             sc->bge_asicrev != BGE_ASICREV_BCM5750)
3020                 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3021         BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3022         BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3023         BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3024
3025         /*
3026          * Disable all of the transmit blocks
3027          */
3028         BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3029         BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3030         BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3031         BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3032         BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3033         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3034             sc->bge_asicrev != BGE_ASICREV_BCM5750)
3035                 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3036         BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3037
3038         /*
3039          * Shut down all of the memory managers and related
3040          * state machines.
3041          */
3042         BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3043         BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3044         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3045             sc->bge_asicrev != BGE_ASICREV_BCM5750)
3046                 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3047         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3048         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3049         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3050             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
3051                 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3052                 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3053         }
3054
3055         /* Disable host interrupts. */
3056         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3057         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3058
3059         /*
3060          * Tell firmware we're shutting down.
3061          */
3062         BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3063
3064         /* Free the RX lists. */
3065         bge_free_rx_ring_std(sc);
3066
3067         /* Free jumbo RX list. */
3068         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3069             sc->bge_asicrev != BGE_ASICREV_BCM5750)
3070                 bge_free_rx_ring_jumbo(sc);
3071
3072         /* Free TX buffers. */
3073         bge_free_tx_ring(sc);
3074
3075         /*
3076          * Isolate/power down the PHY, but leave the media selection
3077          * unchanged so that things will be put back to normal when
3078          * we bring the interface back up.
3079          */
3080         if (!sc->bge_tbi) {
3081                 itmp = ifp->if_flags;
3082                 ifp->if_flags |= IFF_UP;
3083                 ifm = mii->mii_media.ifm_cur;
3084                 mtmp = ifm->ifm_media;
3085                 ifm->ifm_media = IFM_ETHER|IFM_NONE;
3086                 mii_mediachg(mii);
3087                 ifm->ifm_media = mtmp;
3088                 ifp->if_flags = itmp;
3089         }
3090
3091         sc->bge_link = 0;
3092
3093         sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3094
3095         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3096 }
3097
3098 /*
3099  * Stop all chip I/O so that the kernel's probe routines don't
3100  * get confused by errant DMAs when rebooting.
3101  */
3102 static void
3103 bge_shutdown(device_t dev)
3104 {
3105         struct bge_softc *sc = device_get_softc(dev);
3106         struct ifnet *ifp = &sc->arpcom.ac_if;
3107
3108         lwkt_serialize_enter(ifp->if_serializer);
3109         bge_stop(sc);
3110         bge_reset(sc);
3111         lwkt_serialize_exit(ifp->if_serializer);
3112 }
3113
3114 static int
3115 bge_suspend(device_t dev)
3116 {
3117         struct bge_softc *sc = device_get_softc(dev);
3118         struct ifnet *ifp = &sc->arpcom.ac_if;
3119
3120         lwkt_serialize_enter(ifp->if_serializer);
3121         bge_stop(sc);
3122         lwkt_serialize_exit(ifp->if_serializer);
3123
3124         return 0;
3125 }
3126
3127 static int
3128 bge_resume(device_t dev)
3129 {
3130         struct bge_softc *sc = device_get_softc(dev);
3131         struct ifnet *ifp = &sc->arpcom.ac_if;
3132
3133         lwkt_serialize_enter(ifp->if_serializer);
3134
3135         if (ifp->if_flags & IFF_UP) {
3136                 bge_init(sc);
3137
3138                 if (!ifq_is_empty(&ifp->if_snd))
3139                         ifp->if_start(ifp);
3140         }
3141
3142         lwkt_serialize_exit(ifp->if_serializer);
3143
3144         return 0;
3145 }
3146
3147 static void
3148 bge_setpromisc(struct bge_softc *sc)
3149 {
3150         struct ifnet *ifp = &sc->arpcom.ac_if;
3151
3152         if (ifp->if_flags & IFF_PROMISC)
3153                 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3154         else
3155                 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3156 }
3157
3158 static void
3159 bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
3160 {
3161         struct bge_dmamap_arg *ctx = arg;
3162
3163         if (error)
3164                 return;
3165
3166         KASSERT(nsegs == 1 && ctx->bge_maxsegs == 1,
3167                 ("only one segment is allowed\n"));
3168
3169         ctx->bge_segs[0] = *segs;
3170 }
3171
3172 static void
3173 bge_dma_map_mbuf(void *arg, bus_dma_segment_t *segs, int nsegs,
3174                  bus_size_t mapsz __unused, int error)
3175 {
3176         struct bge_dmamap_arg *ctx = arg;
3177         int i;
3178
3179         if (error)
3180                 return;
3181
3182         if (nsegs > ctx->bge_maxsegs) {
3183                 ctx->bge_maxsegs = 0;
3184                 return;
3185         }
3186
3187         ctx->bge_maxsegs = nsegs;
3188         for (i = 0; i < nsegs; ++i)
3189                 ctx->bge_segs[i] = segs[i];
3190 }
3191
3192 static void
3193 bge_dma_free(struct bge_softc *sc)
3194 {
3195         int i;
3196
3197         /* Destroy RX/TX mbuf DMA stuffs. */
3198         if (sc->bge_cdata.bge_mtag != NULL) {
3199                 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3200                         if (sc->bge_cdata.bge_rx_std_dmamap[i]) {
3201                                 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3202                                     sc->bge_cdata.bge_rx_std_dmamap[i]);
3203                         }
3204                 }
3205
3206                 for (i = 0; i < BGE_TX_RING_CNT; i++) {
3207                         if (sc->bge_cdata.bge_tx_dmamap[i]) {
3208                                 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3209                                     sc->bge_cdata.bge_tx_dmamap[i]);
3210                         }
3211                 }
3212                 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
3213         }
3214
3215         /* Destroy standard RX ring */
3216         bge_dma_block_free(sc->bge_cdata.bge_rx_std_ring_tag,
3217                            sc->bge_cdata.bge_rx_std_ring_map,
3218                            sc->bge_ldata.bge_rx_std_ring);
3219
3220         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3221             sc->bge_asicrev != BGE_ASICREV_BCM5750)
3222                 bge_free_jumbo_mem(sc);
3223
3224         /* Destroy RX return ring */
3225         bge_dma_block_free(sc->bge_cdata.bge_rx_return_ring_tag,
3226                            sc->bge_cdata.bge_rx_return_ring_map,
3227                            sc->bge_ldata.bge_rx_return_ring);
3228
3229         /* Destroy TX ring */
3230         bge_dma_block_free(sc->bge_cdata.bge_tx_ring_tag,
3231                            sc->bge_cdata.bge_tx_ring_map,
3232                            sc->bge_ldata.bge_tx_ring);
3233
3234         /* Destroy status block */
3235         bge_dma_block_free(sc->bge_cdata.bge_status_tag,
3236                            sc->bge_cdata.bge_status_map,
3237                            sc->bge_ldata.bge_status_block);
3238
3239         /* Destroy statistics block */
3240         bge_dma_block_free(sc->bge_cdata.bge_stats_tag,
3241                            sc->bge_cdata.bge_stats_map,
3242                            sc->bge_ldata.bge_stats);
3243
3244         /* Destroy the parent tag */
3245         if (sc->bge_cdata.bge_parent_tag != NULL)
3246                 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
3247 }
3248
3249 static int
3250 bge_dma_alloc(struct bge_softc *sc)
3251 {
3252         struct ifnet *ifp = &sc->arpcom.ac_if;
3253         int nseg, i, error;
3254
3255         /*
3256          * Allocate the parent bus DMA tag appropriate for PCI.
3257          */
3258         error = bus_dma_tag_create(NULL, 1, 0,
3259                                    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3260                                    NULL, NULL,
3261                                    MAXBSIZE, BGE_NSEG_NEW,
3262                                    BUS_SPACE_MAXSIZE_32BIT,
3263                                    0, &sc->bge_cdata.bge_parent_tag);
3264         if (error) {
3265                 if_printf(ifp, "could not allocate parent dma tag\n");
3266                 return error;
3267         }
3268
3269         /*
3270          * Create DMA tag for mbufs.
3271          */
3272         nseg = BGE_NSEG_NEW;
3273         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0,
3274                                    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3275                                    NULL, NULL,
3276                                    MCLBYTES * nseg, nseg, MCLBYTES,
3277                                    BUS_DMA_ALLOCNOW, &sc->bge_cdata.bge_mtag);
3278         if (error) {
3279                 if_printf(ifp, "could not allocate mbuf dma tag\n");
3280                 return error;
3281         }
3282
3283         /*
3284          * Create DMA maps for TX/RX mbufs.
3285          */
3286         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3287                 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
3288                                           &sc->bge_cdata.bge_rx_std_dmamap[i]);
3289                 if (error) {
3290                         int j;
3291
3292                         for (j = 0; j < i; ++j) {
3293                                 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3294                                         sc->bge_cdata.bge_rx_std_dmamap[j]);
3295                         }
3296                         bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
3297                         sc->bge_cdata.bge_mtag = NULL;
3298
3299                         if_printf(ifp, "could not create DMA map for RX\n");
3300                         return error;
3301                 }
3302         }
3303
3304         for (i = 0; i < BGE_TX_RING_CNT; i++) {
3305                 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
3306                                           &sc->bge_cdata.bge_tx_dmamap[i]);
3307                 if (error) {
3308                         int j;
3309
3310                         for (j = 0; j < BGE_STD_RX_RING_CNT; ++j) {
3311                                 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3312                                         sc->bge_cdata.bge_rx_std_dmamap[j]);
3313                         }
3314                         for (j = 0; j < i; ++j) {
3315                                 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3316                                         sc->bge_cdata.bge_tx_dmamap[j]);
3317                         }
3318                         bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
3319                         sc->bge_cdata.bge_mtag = NULL;
3320
3321                         if_printf(ifp, "could not create DMA map for TX\n");
3322                         return error;
3323                 }
3324         }
3325
3326         /*
3327          * Create DMA stuffs for standard RX ring.
3328          */
3329         error = bge_dma_block_alloc(sc, BGE_STD_RX_RING_SZ,
3330                                     &sc->bge_cdata.bge_rx_std_ring_tag,
3331                                     &sc->bge_cdata.bge_rx_std_ring_map,
3332                                     (void **)&sc->bge_ldata.bge_rx_std_ring,
3333                                     &sc->bge_ldata.bge_rx_std_ring_paddr);
3334         if (error) {
3335                 if_printf(ifp, "could not create std RX ring\n");
3336                 return error;
3337         }
3338
3339         /*
3340          * Create jumbo buffer pool.
3341          */
3342         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3343             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
3344                 error = bge_alloc_jumbo_mem(sc);
3345                 if (error) {
3346                         if_printf(ifp, "could not create jumbo buffer pool\n");
3347                         return error;
3348                 }
3349         }
3350
3351         /*
3352          * Create DMA stuffs for RX return ring.
3353          */
3354         error = bge_dma_block_alloc(sc, BGE_RX_RTN_RING_SZ(sc),
3355                                     &sc->bge_cdata.bge_rx_return_ring_tag,
3356                                     &sc->bge_cdata.bge_rx_return_ring_map,
3357                                     (void **)&sc->bge_ldata.bge_rx_return_ring,
3358                                     &sc->bge_ldata.bge_rx_return_ring_paddr);
3359         if (error) {
3360                 if_printf(ifp, "could not create RX ret ring\n");
3361                 return error;
3362         }
3363
3364         /*
3365          * Create DMA stuffs for TX ring.
3366          */
3367         error = bge_dma_block_alloc(sc, BGE_TX_RING_SZ,
3368                                     &sc->bge_cdata.bge_tx_ring_tag,
3369                                     &sc->bge_cdata.bge_tx_ring_map,
3370                                     (void **)&sc->bge_ldata.bge_tx_ring,
3371                                     &sc->bge_ldata.bge_tx_ring_paddr);
3372         if (error) {
3373                 if_printf(ifp, "could not create TX ring\n");
3374                 return error;
3375         }
3376
3377         /*
3378          * Create DMA stuffs for status block.
3379          */
3380         error = bge_dma_block_alloc(sc, BGE_STATUS_BLK_SZ,
3381                                     &sc->bge_cdata.bge_status_tag,
3382                                     &sc->bge_cdata.bge_status_map,
3383                                     (void **)&sc->bge_ldata.bge_status_block,
3384                                     &sc->bge_ldata.bge_status_block_paddr);
3385         if (error) {
3386                 if_printf(ifp, "could not create status block\n");
3387                 return error;
3388         }
3389
3390         /*
3391          * Create DMA stuffs for statistics block.
3392          */
3393         error = bge_dma_block_alloc(sc, BGE_STATS_SZ,
3394                                     &sc->bge_cdata.bge_stats_tag,
3395                                     &sc->bge_cdata.bge_stats_map,
3396                                     (void **)&sc->bge_ldata.bge_stats,
3397                                     &sc->bge_ldata.bge_stats_paddr);
3398         if (error) {
3399                 if_printf(ifp, "could not create stats block\n");
3400                 return error;
3401         }
3402         return 0;
3403 }
3404
3405 static int
3406 bge_dma_block_alloc(struct bge_softc *sc, bus_size_t size, bus_dma_tag_t *tag,
3407                     bus_dmamap_t *map, void **addr, bus_addr_t *paddr)
3408 {
3409         struct ifnet *ifp = &sc->arpcom.ac_if;
3410         struct bge_dmamap_arg ctx;
3411         bus_dma_segment_t seg;
3412         int error;
3413
3414         /*
3415          * Create DMA tag
3416          */
3417         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0,
3418                                    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3419                                    NULL, NULL, size, 1, size, 0, tag);
3420         if (error) {
3421                 if_printf(ifp, "could not allocate dma tag\n");
3422                 return error;
3423         }
3424
3425         /*
3426          * Allocate DMA'able memory
3427          */
3428         error = bus_dmamem_alloc(*tag, addr, BUS_DMA_WAITOK | BUS_DMA_ZERO,
3429                                  map);
3430         if (error) {
3431                 if_printf(ifp, "could not allocate dma memory\n");
3432                 bus_dma_tag_destroy(*tag);
3433                 *tag = NULL;
3434                 return error;
3435         }
3436
3437         /*
3438          * Load the DMA'able memory
3439          */
3440         ctx.bge_maxsegs = 1;
3441         ctx.bge_segs = &seg;
3442         error = bus_dmamap_load(*tag, *map, *addr, size, bge_dma_map_addr, &ctx,
3443                                 BUS_DMA_WAITOK);
3444         if (error) {
3445                 if_printf(ifp, "could not load dma memory\n");
3446                 bus_dmamem_free(*tag, *addr, *map);
3447                 bus_dma_tag_destroy(*tag);
3448                 *tag = NULL;
3449                 return error;
3450         }
3451         *paddr = ctx.bge_segs[0].ds_addr;
3452
3453         return 0;
3454 }
3455
3456 static void
3457 bge_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr)
3458 {
3459         if (tag != NULL) {
3460                 bus_dmamap_unload(tag, map);
3461                 bus_dmamem_free(tag, addr, map);
3462                 bus_dma_tag_destroy(tag);
3463         }
3464 }