bge: Factor our bge_link_poll()
[dragonfly.git] / sys / dev / netif / bge / if_bge.c
1 /*
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
34  */
35
36 /*
37  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
38  * 
39  * Written by Bill Paul <wpaul@windriver.com>
40  * Senior Engineer, Wind River Systems
41  */
42
43 /*
44  * The Broadcom BCM5700 is based on technology originally developed by
45  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
46  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
47  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
48  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
49  * frames, highly configurable RX filtering, and 16 RX and TX queues
50  * (which, along with RX filter rules, can be used for QOS applications).
51  * Other features, such as TCP segmentation, may be available as part
52  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
53  * firmware images can be stored in hardware and need not be compiled
54  * into the driver.
55  *
56  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
57  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
58  * 
59  * The BCM5701 is a single-chip solution incorporating both the BCM5700
60  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
61  * does not support external SSRAM.
62  *
63  * Broadcom also produces a variation of the BCM5700 under the "Altima"
64  * brand name, which is functionally similar but lacks PCI-X support.
65  *
66  * Without external SSRAM, you can only have at most 4 TX rings,
67  * and the use of the mini RX ring is disabled. This seems to imply
68  * that these features are simply not available on the BCM5701. As a
69  * result, this driver does not implement any support for the mini RX
70  * ring.
71  */
72
73 #include "opt_polling.h"
74
75 #include <sys/param.h>
76 #include <sys/bus.h>
77 #include <sys/endian.h>
78 #include <sys/kernel.h>
79 #include <sys/ktr.h>
80 #include <sys/interrupt.h>
81 #include <sys/mbuf.h>
82 #include <sys/malloc.h>
83 #include <sys/queue.h>
84 #include <sys/rman.h>
85 #include <sys/serialize.h>
86 #include <sys/socket.h>
87 #include <sys/sockio.h>
88 #include <sys/sysctl.h>
89
90 #include <net/bpf.h>
91 #include <net/ethernet.h>
92 #include <net/if.h>
93 #include <net/if_arp.h>
94 #include <net/if_dl.h>
95 #include <net/if_media.h>
96 #include <net/if_types.h>
97 #include <net/ifq_var.h>
98 #include <net/vlan/if_vlan_var.h>
99 #include <net/vlan/if_vlan_ether.h>
100
101 #include <dev/netif/mii_layer/mii.h>
102 #include <dev/netif/mii_layer/miivar.h>
103 #include <dev/netif/mii_layer/brgphyreg.h>
104
105 #include <bus/pci/pcidevs.h>
106 #include <bus/pci/pcireg.h>
107 #include <bus/pci/pcivar.h>
108
109 #include <dev/netif/bge/if_bgereg.h>
110
111 /* "device miibus" required.  See GENERIC if you get errors here. */
112 #include "miibus_if.h"
113
114 #define BGE_CSUM_FEATURES       (CSUM_IP | CSUM_TCP)
115 #define BGE_MIN_FRAME           60
116
117 static const struct bge_type bge_devs[] = {
118         { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996,
119                 "3COM 3C996 Gigabit Ethernet" },
120
121         { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700,
122                 "Alteon BCM5700 Gigabit Ethernet" },
123         { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701,
124                 "Alteon BCM5701 Gigabit Ethernet" },
125
126         { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000,
127                 "Altima AC1000 Gigabit Ethernet" },
128         { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001,
129                 "Altima AC1002 Gigabit Ethernet" },
130         { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100,
131                 "Altima AC9100 Gigabit Ethernet" },
132
133         { PCI_VENDOR_APPLE, PCI_PRODUCT_APPLE_BCM5701,
134                 "Apple BCM5701 Gigabit Ethernet" },
135
136         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700,
137                 "Broadcom BCM5700 Gigabit Ethernet" },
138         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701,
139                 "Broadcom BCM5701 Gigabit Ethernet" },
140         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702,
141                 "Broadcom BCM5702 Gigabit Ethernet" },
142         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X,
143                 "Broadcom BCM5702X Gigabit Ethernet" },
144         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT,
145                 "Broadcom BCM5702 Gigabit Ethernet" },
146         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703,
147                 "Broadcom BCM5703 Gigabit Ethernet" },
148         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X,
149                 "Broadcom BCM5703X Gigabit Ethernet" },
150         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703A3,
151                 "Broadcom BCM5703 Gigabit Ethernet" },
152         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C,
153                 "Broadcom BCM5704C Dual Gigabit Ethernet" },
154         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S,
155                 "Broadcom BCM5704S Dual Gigabit Ethernet" },
156         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S_ALT,
157                 "Broadcom BCM5704S Dual Gigabit Ethernet" },
158         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705,
159                 "Broadcom BCM5705 Gigabit Ethernet" },
160         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705F,
161                 "Broadcom BCM5705F Gigabit Ethernet" },
162         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K,
163                 "Broadcom BCM5705K Gigabit Ethernet" },
164         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M,
165                 "Broadcom BCM5705M Gigabit Ethernet" },
166         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT,
167                 "Broadcom BCM5705M Gigabit Ethernet" },
168         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714,
169                 "Broadcom BCM5714C Gigabit Ethernet" },
170         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714S,
171                 "Broadcom BCM5714S Gigabit Ethernet" },
172         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715,
173                 "Broadcom BCM5715 Gigabit Ethernet" },
174         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715S,
175                 "Broadcom BCM5715S Gigabit Ethernet" },
176         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720,
177                 "Broadcom BCM5720 Gigabit Ethernet" },
178         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721,
179                 "Broadcom BCM5721 Gigabit Ethernet" },
180         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5722,
181                 "Broadcom BCM5722 Gigabit Ethernet" },
182         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5723,
183                 "Broadcom BCM5723 Gigabit Ethernet" },
184         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750,
185                 "Broadcom BCM5750 Gigabit Ethernet" },
186         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750M,
187                 "Broadcom BCM5750M Gigabit Ethernet" },
188         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751,
189                 "Broadcom BCM5751 Gigabit Ethernet" },
190         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751F,
191                 "Broadcom BCM5751F Gigabit Ethernet" },
192         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M,
193                 "Broadcom BCM5751M Gigabit Ethernet" },
194         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752,
195                 "Broadcom BCM5752 Gigabit Ethernet" },
196         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752M,
197                 "Broadcom BCM5752M Gigabit Ethernet" },
198         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753,
199                 "Broadcom BCM5753 Gigabit Ethernet" },
200         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753F,
201                 "Broadcom BCM5753F Gigabit Ethernet" },
202         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753M,
203                 "Broadcom BCM5753M Gigabit Ethernet" },
204         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754,
205                 "Broadcom BCM5754 Gigabit Ethernet" },
206         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754M,
207                 "Broadcom BCM5754M Gigabit Ethernet" },
208         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755,
209                 "Broadcom BCM5755 Gigabit Ethernet" },
210         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755M,
211                 "Broadcom BCM5755M Gigabit Ethernet" },
212         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5756,
213                 "Broadcom BCM5756 Gigabit Ethernet" },
214         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761,
215                 "Broadcom BCM5761 Gigabit Ethernet" },
216         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761E,
217                 "Broadcom BCM5761E Gigabit Ethernet" },
218         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761S,
219                 "Broadcom BCM5761S Gigabit Ethernet" },
220         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761SE,
221                 "Broadcom BCM5761SE Gigabit Ethernet" },
222         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5764,
223                 "Broadcom BCM5764 Gigabit Ethernet" },
224         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780,
225                 "Broadcom BCM5780 Gigabit Ethernet" },
226         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780S,
227                 "Broadcom BCM5780S Gigabit Ethernet" },
228         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5781,
229                 "Broadcom BCM5781 Gigabit Ethernet" },
230         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782,
231                 "Broadcom BCM5782 Gigabit Ethernet" },
232         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5784,
233                 "Broadcom BCM5784 Gigabit Ethernet" },
234         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785F,
235                 "Broadcom BCM5785F Gigabit Ethernet" },
236         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785G,
237                 "Broadcom BCM5785G Gigabit Ethernet" },
238         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5786,
239                 "Broadcom BCM5786 Gigabit Ethernet" },
240         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787,
241                 "Broadcom BCM5787 Gigabit Ethernet" },
242         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787F,
243                 "Broadcom BCM5787F Gigabit Ethernet" },
244         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787M,
245                 "Broadcom BCM5787M Gigabit Ethernet" },
246         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788,
247                 "Broadcom BCM5788 Gigabit Ethernet" },
248         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789,
249                 "Broadcom BCM5789 Gigabit Ethernet" },
250         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901,
251                 "Broadcom BCM5901 Fast Ethernet" },
252         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2,
253                 "Broadcom BCM5901A2 Fast Ethernet" },
254         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5903M,
255                 "Broadcom BCM5903M Fast Ethernet" },
256         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906,
257                 "Broadcom BCM5906 Fast Ethernet"},
258         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906M,
259                 "Broadcom BCM5906M Fast Ethernet"},
260         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57760,
261                 "Broadcom BCM57760 Gigabit Ethernet"},
262         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57780,
263                 "Broadcom BCM57780 Gigabit Ethernet"},
264         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57788,
265                 "Broadcom BCM57788 Gigabit Ethernet"},
266         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57790,
267                 "Broadcom BCM57790 Gigabit Ethernet"},
268         { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
269                 "SysKonnect Gigabit Ethernet" },
270
271         { 0, 0, NULL }
272 };
273
274 #define BGE_IS_JUMBO_CAPABLE(sc)        ((sc)->bge_flags & BGE_FLAG_JUMBO)
275 #define BGE_IS_5700_FAMILY(sc)          ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
276 #define BGE_IS_5705_PLUS(sc)            ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
277 #define BGE_IS_5714_FAMILY(sc)          ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
278 #define BGE_IS_575X_PLUS(sc)            ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
279 #define BGE_IS_5755_PLUS(sc)            ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
280 #define BGE_IS_5788(sc)                 ((sc)->bge_flags & BGE_FLAG_5788)
281
282 typedef int     (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
283
284 static int      bge_probe(device_t);
285 static int      bge_attach(device_t);
286 static int      bge_detach(device_t);
287 static void     bge_txeof(struct bge_softc *, uint16_t);
288 static void     bge_rxeof(struct bge_softc *, uint16_t);
289
290 static void     bge_tick(void *);
291 static void     bge_stats_update(struct bge_softc *);
292 static void     bge_stats_update_regs(struct bge_softc *);
293 static struct mbuf *
294                 bge_defrag_shortdma(struct mbuf *);
295 static int      bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
296
297 #ifdef DEVICE_POLLING
298 static void     bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
299 #endif
300 static void     bge_intr(void *);
301 static void     bge_intr_status_tag(void *);
302 static void     bge_enable_intr(struct bge_softc *);
303 static void     bge_disable_intr(struct bge_softc *);
304 static void     bge_start(struct ifnet *);
305 static int      bge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
306 static void     bge_init(void *);
307 static void     bge_stop(struct bge_softc *);
308 static void     bge_watchdog(struct ifnet *);
309 static void     bge_shutdown(device_t);
310 static int      bge_suspend(device_t);
311 static int      bge_resume(device_t);
312 static int      bge_ifmedia_upd(struct ifnet *);
313 static void     bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
314
315 static uint8_t  bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
316 static int      bge_read_nvram(struct bge_softc *, caddr_t, int, int);
317
318 static uint8_t  bge_eeprom_getbyte(struct bge_softc *, uint32_t, uint8_t *);
319 static int      bge_read_eeprom(struct bge_softc *, caddr_t, uint32_t, size_t);
320
321 static void     bge_setmulti(struct bge_softc *);
322 static void     bge_setpromisc(struct bge_softc *);
323
324 static int      bge_alloc_jumbo_mem(struct bge_softc *);
325 static void     bge_free_jumbo_mem(struct bge_softc *);
326 static struct bge_jslot
327                 *bge_jalloc(struct bge_softc *);
328 static void     bge_jfree(void *);
329 static void     bge_jref(void *);
330 static int      bge_newbuf_std(struct bge_softc *, int, int);
331 static int      bge_newbuf_jumbo(struct bge_softc *, int, int);
332 static void     bge_setup_rxdesc_std(struct bge_softc *, int);
333 static void     bge_setup_rxdesc_jumbo(struct bge_softc *, int);
334 static int      bge_init_rx_ring_std(struct bge_softc *);
335 static void     bge_free_rx_ring_std(struct bge_softc *);
336 static int      bge_init_rx_ring_jumbo(struct bge_softc *);
337 static void     bge_free_rx_ring_jumbo(struct bge_softc *);
338 static void     bge_free_tx_ring(struct bge_softc *);
339 static int      bge_init_tx_ring(struct bge_softc *);
340
341 static int      bge_chipinit(struct bge_softc *);
342 static int      bge_blockinit(struct bge_softc *);
343 static void     bge_stop_block(struct bge_softc *, bus_size_t, uint32_t);
344
345 static uint32_t bge_readmem_ind(struct bge_softc *, uint32_t);
346 static void     bge_writemem_ind(struct bge_softc *, uint32_t, uint32_t);
347 #ifdef notdef
348 static uint32_t bge_readreg_ind(struct bge_softc *, uint32_t);
349 #endif
350 static void     bge_writereg_ind(struct bge_softc *, uint32_t, uint32_t);
351 static void     bge_writemem_direct(struct bge_softc *, uint32_t, uint32_t);
352 static void     bge_writembx(struct bge_softc *, int, int);
353
354 static int      bge_miibus_readreg(device_t, int, int);
355 static int      bge_miibus_writereg(device_t, int, int, int);
356 static void     bge_miibus_statchg(device_t);
357 static void     bge_bcm5700_link_upd(struct bge_softc *, uint32_t);
358 static void     bge_tbi_link_upd(struct bge_softc *, uint32_t);
359 static void     bge_copper_link_upd(struct bge_softc *, uint32_t);
360 static void     bge_autopoll_link_upd(struct bge_softc *, uint32_t);
361 static void     bge_link_poll(struct bge_softc *);
362
363 static void     bge_reset(struct bge_softc *);
364
365 static int      bge_dma_alloc(struct bge_softc *);
366 static void     bge_dma_free(struct bge_softc *);
367 static int      bge_dma_block_alloc(struct bge_softc *, bus_size_t,
368                                     bus_dma_tag_t *, bus_dmamap_t *,
369                                     void **, bus_addr_t *);
370 static void     bge_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
371
372 static int      bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
373 static int      bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
374 static int      bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
375 static int      bge_get_eaddr(struct bge_softc *, uint8_t[]);
376
377 static void     bge_coal_change(struct bge_softc *);
378 static int      bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS);
379 static int      bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS);
380 static int      bge_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS);
381 static int      bge_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS);
382 static int      bge_sysctl_rx_coal_ticks_int(SYSCTL_HANDLER_ARGS);
383 static int      bge_sysctl_tx_coal_ticks_int(SYSCTL_HANDLER_ARGS);
384 static int      bge_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS);
385 static int      bge_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS);
386 static int      bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *, uint32_t);
387
388 /*
389  * Set following tunable to 1 for some IBM blade servers with the DNLK
390  * switch module. Auto negotiation is broken for those configurations.
391  */
392 static int      bge_fake_autoneg = 0;
393 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg);
394
395 #if !defined(KTR_IF_BGE)
396 #define KTR_IF_BGE      KTR_ALL
397 #endif
398 KTR_INFO_MASTER(if_bge);
399 KTR_INFO(KTR_IF_BGE, if_bge, intr, 0, "intr");
400 KTR_INFO(KTR_IF_BGE, if_bge, rx_pkt, 1, "rx_pkt");
401 KTR_INFO(KTR_IF_BGE, if_bge, tx_pkt, 2, "tx_pkt");
402 #define logif(name)     KTR_LOG(if_bge_ ## name)
403
404 static device_method_t bge_methods[] = {
405         /* Device interface */
406         DEVMETHOD(device_probe,         bge_probe),
407         DEVMETHOD(device_attach,        bge_attach),
408         DEVMETHOD(device_detach,        bge_detach),
409         DEVMETHOD(device_shutdown,      bge_shutdown),
410         DEVMETHOD(device_suspend,       bge_suspend),
411         DEVMETHOD(device_resume,        bge_resume),
412
413         /* bus interface */
414         DEVMETHOD(bus_print_child,      bus_generic_print_child),
415         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
416
417         /* MII interface */
418         DEVMETHOD(miibus_readreg,       bge_miibus_readreg),
419         DEVMETHOD(miibus_writereg,      bge_miibus_writereg),
420         DEVMETHOD(miibus_statchg,       bge_miibus_statchg),
421
422         { 0, 0 }
423 };
424
425 static DEFINE_CLASS_0(bge, bge_driver, bge_methods, sizeof(struct bge_softc));
426 static devclass_t bge_devclass;
427
428 DECLARE_DUMMY_MODULE(if_bge);
429 DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, NULL, NULL);
430 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, NULL, NULL);
431
432 static uint32_t
433 bge_readmem_ind(struct bge_softc *sc, uint32_t off)
434 {
435         device_t dev = sc->bge_dev;
436         uint32_t val;
437
438         if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
439             off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
440                 return 0;
441
442         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
443         val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
444         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
445         return (val);
446 }
447
448 static void
449 bge_writemem_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
450 {
451         device_t dev = sc->bge_dev;
452
453         if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
454             off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
455                 return;
456
457         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
458         pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
459         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
460 }
461
462 #ifdef notdef
463 static uint32_t
464 bge_readreg_ind(struct bge_softc *sc, uin32_t off)
465 {
466         device_t dev = sc->bge_dev;
467
468         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
469         return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
470 }
471 #endif
472
473 static void
474 bge_writereg_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
475 {
476         device_t dev = sc->bge_dev;
477
478         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
479         pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
480 }
481
482 static void
483 bge_writemem_direct(struct bge_softc *sc, uint32_t off, uint32_t val)
484 {
485         CSR_WRITE_4(sc, off, val);
486 }
487
488 static void
489 bge_writembx(struct bge_softc *sc, int off, int val)
490 {
491         if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
492                 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
493
494         CSR_WRITE_4(sc, off, val);
495         if (sc->bge_mbox_reorder)
496                 CSR_READ_4(sc, off);
497 }
498
499 static uint8_t
500 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
501 {
502         uint32_t access, byte = 0;
503         int i;
504
505         /* Lock. */
506         CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
507         for (i = 0; i < 8000; i++) {
508                 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
509                         break;
510                 DELAY(20);
511         }
512         if (i == 8000)
513                 return (1);
514
515         /* Enable access. */
516         access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
517         CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
518
519         CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
520         CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
521         for (i = 0; i < BGE_TIMEOUT * 10; i++) {
522                 DELAY(10);
523                 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
524                         DELAY(10);
525                         break;
526                 }
527         }
528
529         if (i == BGE_TIMEOUT * 10) {
530                 if_printf(&sc->arpcom.ac_if, "nvram read timed out\n");
531                 return (1);
532         }
533
534         /* Get result. */
535         byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
536
537         *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
538
539         /* Disable access. */
540         CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
541
542         /* Unlock. */
543         CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
544         CSR_READ_4(sc, BGE_NVRAM_SWARB);
545
546         return (0);
547 }
548
549 /*
550  * Read a sequence of bytes from NVRAM.
551  */
552 static int
553 bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
554 {
555         int err = 0, i;
556         uint8_t byte = 0;
557
558         if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
559                 return (1);
560
561         for (i = 0; i < cnt; i++) {
562                 err = bge_nvram_getbyte(sc, off + i, &byte);
563                 if (err)
564                         break;
565                 *(dest + i) = byte;
566         }
567
568         return (err ? 1 : 0);
569 }
570
571 /*
572  * Read a byte of data stored in the EEPROM at address 'addr.' The
573  * BCM570x supports both the traditional bitbang interface and an
574  * auto access interface for reading the EEPROM. We use the auto
575  * access method.
576  */
577 static uint8_t
578 bge_eeprom_getbyte(struct bge_softc *sc, uint32_t addr, uint8_t *dest)
579 {
580         int i;
581         uint32_t byte = 0;
582
583         /*
584          * Enable use of auto EEPROM access so we can avoid
585          * having to use the bitbang method.
586          */
587         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
588
589         /* Reset the EEPROM, load the clock period. */
590         CSR_WRITE_4(sc, BGE_EE_ADDR,
591             BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
592         DELAY(20);
593
594         /* Issue the read EEPROM command. */
595         CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
596
597         /* Wait for completion */
598         for(i = 0; i < BGE_TIMEOUT * 10; i++) {
599                 DELAY(10);
600                 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
601                         break;
602         }
603
604         if (i == BGE_TIMEOUT) {
605                 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
606                 return(1);
607         }
608
609         /* Get result. */
610         byte = CSR_READ_4(sc, BGE_EE_DATA);
611
612         *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
613
614         return(0);
615 }
616
617 /*
618  * Read a sequence of bytes from the EEPROM.
619  */
620 static int
621 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, uint32_t off, size_t len)
622 {
623         size_t i;
624         int err;
625         uint8_t byte;
626
627         for (byte = 0, err = 0, i = 0; i < len; i++) {
628                 err = bge_eeprom_getbyte(sc, off + i, &byte);
629                 if (err)
630                         break;
631                 *(dest + i) = byte;
632         }
633
634         return(err ? 1 : 0);
635 }
636
637 static int
638 bge_miibus_readreg(device_t dev, int phy, int reg)
639 {
640         struct bge_softc *sc = device_get_softc(dev);
641         uint32_t val;
642         int i;
643
644         KASSERT(phy == sc->bge_phyno,
645             ("invalid phyno %d, should be %d", phy, sc->bge_phyno));
646
647         /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
648         if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
649                 CSR_WRITE_4(sc, BGE_MI_MODE,
650                     sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
651                 DELAY(80);
652         }
653
654         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
655             BGE_MIPHY(phy) | BGE_MIREG(reg));
656
657         /* Poll for the PHY register access to complete. */
658         for (i = 0; i < BGE_TIMEOUT; i++) {
659                 DELAY(10);
660                 val = CSR_READ_4(sc, BGE_MI_COMM);
661                 if ((val & BGE_MICOMM_BUSY) == 0) {
662                         DELAY(5);
663                         val = CSR_READ_4(sc, BGE_MI_COMM);
664                         break;
665                 }
666         }
667         if (i == BGE_TIMEOUT) {
668                 if_printf(&sc->arpcom.ac_if, "PHY read timed out "
669                     "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val);
670                 val = 0;
671         }
672
673         /* Restore the autopoll bit if necessary. */
674         if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
675                 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
676                 DELAY(80);
677         }
678
679         if (val & BGE_MICOMM_READFAIL)
680                 return 0;
681
682         return (val & 0xFFFF);
683 }
684
685 static int
686 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
687 {
688         struct bge_softc *sc = device_get_softc(dev);
689         int i;
690
691         KASSERT(phy == sc->bge_phyno,
692             ("invalid phyno %d, should be %d", phy, sc->bge_phyno));
693
694         if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
695             (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
696                return 0;
697
698         /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
699         if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
700                 CSR_WRITE_4(sc, BGE_MI_MODE,
701                     sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
702                 DELAY(80);
703         }
704
705         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
706             BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
707
708         for (i = 0; i < BGE_TIMEOUT; i++) {
709                 DELAY(10);
710                 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
711                         DELAY(5);
712                         CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
713                         break;
714                 }
715         }
716         if (i == BGE_TIMEOUT) {
717                 if_printf(&sc->arpcom.ac_if, "PHY write timed out "
718                     "(phy %d, reg %d, val %d)\n", phy, reg, val);
719         }
720
721         /* Restore the autopoll bit if necessary. */
722         if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
723                 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
724                 DELAY(80);
725         }
726
727         return 0;
728 }
729
730 static void
731 bge_miibus_statchg(device_t dev)
732 {
733         struct bge_softc *sc;
734         struct mii_data *mii;
735
736         sc = device_get_softc(dev);
737         mii = device_get_softc(sc->bge_miibus);
738
739         if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
740             (IFM_ACTIVE | IFM_AVALID)) {
741                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
742                 case IFM_10_T:
743                 case IFM_100_TX:
744                         sc->bge_link = 1;
745                         break;
746                 case IFM_1000_T:
747                 case IFM_1000_SX:
748                 case IFM_2500_SX:
749                         if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
750                                 sc->bge_link = 1;
751                         else
752                                 sc->bge_link = 0;
753                         break;
754                 default:
755                         sc->bge_link = 0;
756                         break;
757                 }
758         } else {
759                 sc->bge_link = 0;
760         }
761         if (sc->bge_link == 0)
762                 return;
763
764         BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
765         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
766             IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
767                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
768         } else {
769                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
770         }
771
772         if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
773                 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
774         } else {
775                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
776         }
777 }
778
779 /*
780  * Memory management for jumbo frames.
781  */
782 static int
783 bge_alloc_jumbo_mem(struct bge_softc *sc)
784 {
785         struct ifnet *ifp = &sc->arpcom.ac_if;
786         struct bge_jslot *entry;
787         uint8_t *ptr;
788         bus_addr_t paddr;
789         int i, error;
790
791         /*
792          * Create tag for jumbo mbufs.
793          * This is really a bit of a kludge. We allocate a special
794          * jumbo buffer pool which (thanks to the way our DMA
795          * memory allocation works) will consist of contiguous
796          * pages. This means that even though a jumbo buffer might
797          * be larger than a page size, we don't really need to
798          * map it into more than one DMA segment. However, the
799          * default mbuf tag will result in multi-segment mappings,
800          * so we have to create a special jumbo mbuf tag that
801          * lets us get away with mapping the jumbo buffers as
802          * a single segment. I think eventually the driver should
803          * be changed so that it uses ordinary mbufs and cluster
804          * buffers, i.e. jumbo frames can span multiple DMA
805          * descriptors. But that's a project for another day.
806          */
807
808         /*
809          * Create DMA stuffs for jumbo RX ring.
810          */
811         error = bge_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ,
812                                     &sc->bge_cdata.bge_rx_jumbo_ring_tag,
813                                     &sc->bge_cdata.bge_rx_jumbo_ring_map,
814                                     (void *)&sc->bge_ldata.bge_rx_jumbo_ring,
815                                     &sc->bge_ldata.bge_rx_jumbo_ring_paddr);
816         if (error) {
817                 if_printf(ifp, "could not create jumbo RX ring\n");
818                 return error;
819         }
820
821         /*
822          * Create DMA stuffs for jumbo buffer block.
823          */
824         error = bge_dma_block_alloc(sc, BGE_JMEM,
825                                     &sc->bge_cdata.bge_jumbo_tag,
826                                     &sc->bge_cdata.bge_jumbo_map,
827                                     (void **)&sc->bge_ldata.bge_jumbo_buf,
828                                     &paddr);
829         if (error) {
830                 if_printf(ifp, "could not create jumbo buffer\n");
831                 return error;
832         }
833
834         SLIST_INIT(&sc->bge_jfree_listhead);
835
836         /*
837          * Now divide it up into 9K pieces and save the addresses
838          * in an array. Note that we play an evil trick here by using
839          * the first few bytes in the buffer to hold the the address
840          * of the softc structure for this interface. This is because
841          * bge_jfree() needs it, but it is called by the mbuf management
842          * code which will not pass it to us explicitly.
843          */
844         for (i = 0, ptr = sc->bge_ldata.bge_jumbo_buf; i < BGE_JSLOTS; i++) {
845                 entry = &sc->bge_cdata.bge_jslots[i];
846                 entry->bge_sc = sc;
847                 entry->bge_buf = ptr;
848                 entry->bge_paddr = paddr;
849                 entry->bge_inuse = 0;
850                 entry->bge_slot = i;
851                 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jslot_link);
852
853                 ptr += BGE_JLEN;
854                 paddr += BGE_JLEN;
855         }
856         return 0;
857 }
858
859 static void
860 bge_free_jumbo_mem(struct bge_softc *sc)
861 {
862         /* Destroy jumbo RX ring. */
863         bge_dma_block_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
864                            sc->bge_cdata.bge_rx_jumbo_ring_map,
865                            sc->bge_ldata.bge_rx_jumbo_ring);
866
867         /* Destroy jumbo buffer block. */
868         bge_dma_block_free(sc->bge_cdata.bge_jumbo_tag,
869                            sc->bge_cdata.bge_jumbo_map,
870                            sc->bge_ldata.bge_jumbo_buf);
871 }
872
873 /*
874  * Allocate a jumbo buffer.
875  */
876 static struct bge_jslot *
877 bge_jalloc(struct bge_softc *sc)
878 {
879         struct bge_jslot *entry;
880
881         lwkt_serialize_enter(&sc->bge_jslot_serializer);
882         entry = SLIST_FIRST(&sc->bge_jfree_listhead);
883         if (entry) {
884                 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jslot_link);
885                 entry->bge_inuse = 1;
886         } else {
887                 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
888         }
889         lwkt_serialize_exit(&sc->bge_jslot_serializer);
890         return(entry);
891 }
892
893 /*
894  * Adjust usage count on a jumbo buffer.
895  */
896 static void
897 bge_jref(void *arg)
898 {
899         struct bge_jslot *entry = (struct bge_jslot *)arg;
900         struct bge_softc *sc = entry->bge_sc;
901
902         if (sc == NULL)
903                 panic("bge_jref: can't find softc pointer!");
904
905         if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
906                 panic("bge_jref: asked to reference buffer "
907                     "that we don't manage!");
908         } else if (entry->bge_inuse == 0) {
909                 panic("bge_jref: buffer already free!");
910         } else {
911                 atomic_add_int(&entry->bge_inuse, 1);
912         }
913 }
914
915 /*
916  * Release a jumbo buffer.
917  */
918 static void
919 bge_jfree(void *arg)
920 {
921         struct bge_jslot *entry = (struct bge_jslot *)arg;
922         struct bge_softc *sc = entry->bge_sc;
923
924         if (sc == NULL)
925                 panic("bge_jfree: can't find softc pointer!");
926
927         if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
928                 panic("bge_jfree: asked to free buffer that we don't manage!");
929         } else if (entry->bge_inuse == 0) {
930                 panic("bge_jfree: buffer already free!");
931         } else {
932                 /*
933                  * Possible MP race to 0, use the serializer.  The atomic insn
934                  * is still needed for races against bge_jref().
935                  */
936                 lwkt_serialize_enter(&sc->bge_jslot_serializer);
937                 atomic_subtract_int(&entry->bge_inuse, 1);
938                 if (entry->bge_inuse == 0) {
939                         SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 
940                                           entry, jslot_link);
941                 }
942                 lwkt_serialize_exit(&sc->bge_jslot_serializer);
943         }
944 }
945
946
947 /*
948  * Intialize a standard receive ring descriptor.
949  */
950 static int
951 bge_newbuf_std(struct bge_softc *sc, int i, int init)
952 {
953         struct mbuf *m_new = NULL;
954         bus_dma_segment_t seg;
955         bus_dmamap_t map;
956         int error, nsegs;
957
958         m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
959         if (m_new == NULL)
960                 return ENOBUFS;
961         m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
962
963         if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
964                 m_adj(m_new, ETHER_ALIGN);
965
966         error = bus_dmamap_load_mbuf_segment(sc->bge_cdata.bge_rx_mtag,
967                         sc->bge_cdata.bge_rx_tmpmap, m_new,
968                         &seg, 1, &nsegs, BUS_DMA_NOWAIT);
969         if (error) {
970                 m_freem(m_new);
971                 return error;
972         }
973
974         if (!init) {
975                 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
976                                 sc->bge_cdata.bge_rx_std_dmamap[i],
977                                 BUS_DMASYNC_POSTREAD);
978                 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
979                         sc->bge_cdata.bge_rx_std_dmamap[i]);
980         }
981
982         map = sc->bge_cdata.bge_rx_tmpmap;
983         sc->bge_cdata.bge_rx_tmpmap = sc->bge_cdata.bge_rx_std_dmamap[i];
984         sc->bge_cdata.bge_rx_std_dmamap[i] = map;
985
986         sc->bge_cdata.bge_rx_std_chain[i].bge_mbuf = m_new;
987         sc->bge_cdata.bge_rx_std_chain[i].bge_paddr = seg.ds_addr;
988
989         bge_setup_rxdesc_std(sc, i);
990         return 0;
991 }
992
993 static void
994 bge_setup_rxdesc_std(struct bge_softc *sc, int i)
995 {
996         struct bge_rxchain *rc;
997         struct bge_rx_bd *r;
998
999         rc = &sc->bge_cdata.bge_rx_std_chain[i];
1000         r = &sc->bge_ldata.bge_rx_std_ring[i];
1001
1002         r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bge_paddr);
1003         r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bge_paddr);
1004         r->bge_len = rc->bge_mbuf->m_len;
1005         r->bge_idx = i;
1006         r->bge_flags = BGE_RXBDFLAG_END;
1007 }
1008
1009 /*
1010  * Initialize a jumbo receive ring descriptor. This allocates
1011  * a jumbo buffer from the pool managed internally by the driver.
1012  */
1013 static int
1014 bge_newbuf_jumbo(struct bge_softc *sc, int i, int init)
1015 {
1016         struct mbuf *m_new = NULL;
1017         struct bge_jslot *buf;
1018         bus_addr_t paddr;
1019
1020         /* Allocate the mbuf. */
1021         MGETHDR(m_new, init ? MB_WAIT : MB_DONTWAIT, MT_DATA);
1022         if (m_new == NULL)
1023                 return ENOBUFS;
1024
1025         /* Allocate the jumbo buffer */
1026         buf = bge_jalloc(sc);
1027         if (buf == NULL) {
1028                 m_freem(m_new);
1029                 return ENOBUFS;
1030         }
1031
1032         /* Attach the buffer to the mbuf. */
1033         m_new->m_ext.ext_arg = buf;
1034         m_new->m_ext.ext_buf = buf->bge_buf;
1035         m_new->m_ext.ext_free = bge_jfree;
1036         m_new->m_ext.ext_ref = bge_jref;
1037         m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
1038
1039         m_new->m_flags |= M_EXT;
1040
1041         m_new->m_data = m_new->m_ext.ext_buf;
1042         m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
1043
1044         paddr = buf->bge_paddr;
1045         if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) {
1046                 m_adj(m_new, ETHER_ALIGN);
1047                 paddr += ETHER_ALIGN;
1048         }
1049
1050         /* Save necessary information */
1051         sc->bge_cdata.bge_rx_jumbo_chain[i].bge_mbuf = m_new;
1052         sc->bge_cdata.bge_rx_jumbo_chain[i].bge_paddr = paddr;
1053
1054         /* Set up the descriptor. */
1055         bge_setup_rxdesc_jumbo(sc, i);
1056         return 0;
1057 }
1058
1059 static void
1060 bge_setup_rxdesc_jumbo(struct bge_softc *sc, int i)
1061 {
1062         struct bge_rx_bd *r;
1063         struct bge_rxchain *rc;
1064
1065         r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
1066         rc = &sc->bge_cdata.bge_rx_jumbo_chain[i];
1067
1068         r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bge_paddr);
1069         r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bge_paddr);
1070         r->bge_len = rc->bge_mbuf->m_len;
1071         r->bge_idx = i;
1072         r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
1073 }
1074
1075 static int
1076 bge_init_rx_ring_std(struct bge_softc *sc)
1077 {
1078         int i, error;
1079
1080         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1081                 error = bge_newbuf_std(sc, i, 1);
1082                 if (error)
1083                         return error;
1084         };
1085
1086         sc->bge_std = BGE_STD_RX_RING_CNT - 1;
1087         bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1088
1089         return(0);
1090 }
1091
1092 static void
1093 bge_free_rx_ring_std(struct bge_softc *sc)
1094 {
1095         int i;
1096
1097         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1098                 struct bge_rxchain *rc = &sc->bge_cdata.bge_rx_std_chain[i];
1099
1100                 if (rc->bge_mbuf != NULL) {
1101                         bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1102                                           sc->bge_cdata.bge_rx_std_dmamap[i]);
1103                         m_freem(rc->bge_mbuf);
1104                         rc->bge_mbuf = NULL;
1105                 }
1106                 bzero(&sc->bge_ldata.bge_rx_std_ring[i],
1107                     sizeof(struct bge_rx_bd));
1108         }
1109 }
1110
1111 static int
1112 bge_init_rx_ring_jumbo(struct bge_softc *sc)
1113 {
1114         struct bge_rcb *rcb;
1115         int i, error;
1116
1117         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1118                 error = bge_newbuf_jumbo(sc, i, 1);
1119                 if (error)
1120                         return error;
1121         };
1122
1123         sc->bge_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
1124
1125         rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1126         rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
1127         CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1128
1129         bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1130
1131         return(0);
1132 }
1133
1134 static void
1135 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1136 {
1137         int i;
1138
1139         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1140                 struct bge_rxchain *rc = &sc->bge_cdata.bge_rx_jumbo_chain[i];
1141
1142                 if (rc->bge_mbuf != NULL) {
1143                         m_freem(rc->bge_mbuf);
1144                         rc->bge_mbuf = NULL;
1145                 }
1146                 bzero(&sc->bge_ldata.bge_rx_jumbo_ring[i],
1147                     sizeof(struct bge_rx_bd));
1148         }
1149 }
1150
1151 static void
1152 bge_free_tx_ring(struct bge_softc *sc)
1153 {
1154         int i;
1155
1156         for (i = 0; i < BGE_TX_RING_CNT; i++) {
1157                 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1158                         bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1159                                           sc->bge_cdata.bge_tx_dmamap[i]);
1160                         m_freem(sc->bge_cdata.bge_tx_chain[i]);
1161                         sc->bge_cdata.bge_tx_chain[i] = NULL;
1162                 }
1163                 bzero(&sc->bge_ldata.bge_tx_ring[i],
1164                     sizeof(struct bge_tx_bd));
1165         }
1166 }
1167
1168 static int
1169 bge_init_tx_ring(struct bge_softc *sc)
1170 {
1171         sc->bge_txcnt = 0;
1172         sc->bge_tx_saved_considx = 0;
1173         sc->bge_tx_prodidx = 0;
1174
1175         /* Initialize transmit producer index for host-memory send ring. */
1176         bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1177
1178         /* 5700 b2 errata */
1179         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1180                 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1181
1182         bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1183         /* 5700 b2 errata */
1184         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1185                 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1186
1187         return(0);
1188 }
1189
1190 static void
1191 bge_setmulti(struct bge_softc *sc)
1192 {
1193         struct ifnet *ifp;
1194         struct ifmultiaddr *ifma;
1195         uint32_t hashes[4] = { 0, 0, 0, 0 };
1196         int h, i;
1197
1198         ifp = &sc->arpcom.ac_if;
1199
1200         if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1201                 for (i = 0; i < 4; i++)
1202                         CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1203                 return;
1204         }
1205
1206         /* First, zot all the existing filters. */
1207         for (i = 0; i < 4; i++)
1208                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1209
1210         /* Now program new ones. */
1211         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1212                 if (ifma->ifma_addr->sa_family != AF_LINK)
1213                         continue;
1214                 h = ether_crc32_le(
1215                     LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1216                     ETHER_ADDR_LEN) & 0x7f;
1217                 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1218         }
1219
1220         for (i = 0; i < 4; i++)
1221                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1222 }
1223
1224 /*
1225  * Do endian, PCI and DMA initialization. Also check the on-board ROM
1226  * self-test results.
1227  */
1228 static int
1229 bge_chipinit(struct bge_softc *sc)
1230 {
1231         int i;
1232         uint32_t dma_rw_ctl;
1233         uint16_t val;
1234
1235         /* Set endian type before we access any non-PCI registers. */
1236         pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1237             BGE_INIT | sc->bge_pci_miscctl, 4);
1238
1239         /* Clear the MAC control register */
1240         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1241
1242         /*
1243          * Clear the MAC statistics block in the NIC's
1244          * internal memory.
1245          */
1246         for (i = BGE_STATS_BLOCK;
1247             i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1248                 BGE_MEMWIN_WRITE(sc, i, 0);
1249
1250         for (i = BGE_STATUS_BLOCK;
1251             i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1252                 BGE_MEMWIN_WRITE(sc, i, 0);
1253
1254         if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1255                 /*
1256                  * Fix data corruption caused by non-qword write with WB.
1257                  * Fix master abort in PCI mode.
1258                  * Fix PCI latency timer.
1259                  */
1260                 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1261                 val |= (1 << 10) | (1 << 12) | (1 << 13);
1262                 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1263         }
1264
1265         /* Set up the PCI DMA control register. */
1266         if (sc->bge_flags & BGE_FLAG_PCIE) {
1267                 /* PCI Express */
1268                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1269                     (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1270                     (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1271         } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1272                 /* PCI-X bus */
1273                 if (BGE_IS_5714_FAMILY(sc)) {
1274                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD;
1275                         dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */
1276                         /* XXX magic values, Broadcom-supplied Linux driver */
1277                         if (sc->bge_asicrev == BGE_ASICREV_BCM5780) {
1278                                 dma_rw_ctl |= (1 << 20) | (1 << 18) | 
1279                                     BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1280                         } else {
1281                                 dma_rw_ctl |= (1 << 20) | (1 << 18) | (1 << 15);
1282                         }
1283                 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
1284                         /*
1285                          * In the BCM5703, the DMA read watermark should
1286                          * be set to less than or equal to the maximum
1287                          * memory read byte count of the PCI-X command
1288                          * register.
1289                          */
1290                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1291                             (0x4 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1292                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1293                 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1294                         /*
1295                          * The 5704 uses a different encoding of read/write
1296                          * watermarks.
1297                          */
1298                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1299                             (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1300                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1301                 } else {
1302                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1303                             (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1304                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1305                             (0x0F);
1306                 }
1307
1308                 /*
1309                  * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1310                  * for hardware bugs.
1311                  */
1312                 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1313                     sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1314                         uint32_t tmp;
1315
1316                         tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1317                         if (tmp == 0x6 || tmp == 0x7)
1318                                 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1319                 }
1320         } else {
1321                 /* Conventional PCI bus */
1322                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1323                     (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1324                     (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1325                     (0x0F);
1326         }
1327
1328         if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1329             sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1330             sc->bge_asicrev == BGE_ASICREV_BCM5705)
1331                 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1332         pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1333
1334         /*
1335          * Set up general mode register.
1336          */
1337         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1338             BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1339             BGE_MODECTL_TX_NO_PHDR_CSUM);
1340
1341         /*
1342          * BCM5701 B5 have a bug causing data corruption when using
1343          * 64-bit DMA reads, which can be terminated early and then
1344          * completed later as 32-bit accesses, in combination with
1345          * certain bridges.
1346          */
1347         if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1348             sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1349                 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1350
1351         /*
1352          * Disable memory write invalidate.  Apparently it is not supported
1353          * properly by these devices.
1354          */
1355         PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1356
1357         /* Set the timer prescaler (always 66Mhz) */
1358         CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1359
1360         if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1361                 DELAY(40);      /* XXX */
1362
1363                 /* Put PHY into ready state */
1364                 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1365                 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1366                 DELAY(40);
1367         }
1368
1369         return(0);
1370 }
1371
1372 static int
1373 bge_blockinit(struct bge_softc *sc)
1374 {
1375         struct bge_rcb *rcb;
1376         bus_size_t vrcb;
1377         bge_hostaddr taddr;
1378         uint32_t val;
1379         int i, limit;
1380
1381         /*
1382          * Initialize the memory window pointer register so that
1383          * we can access the first 32K of internal NIC RAM. This will
1384          * allow us to set up the TX send ring RCBs and the RX return
1385          * ring RCBs, plus other things which live in NIC memory.
1386          */
1387         CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1388
1389         /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1390
1391         if (!BGE_IS_5705_PLUS(sc)) {
1392                 /* Configure mbuf memory pool */
1393                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1394                 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1395                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1396                 else
1397                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1398
1399                 /* Configure DMA resource pool */
1400                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1401                     BGE_DMA_DESCRIPTORS);
1402                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1403         }
1404
1405         /* Configure mbuf pool watermarks */
1406         if (!BGE_IS_5705_PLUS(sc)) {
1407                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1408                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1409                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1410         } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1411                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1412                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1413                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1414         } else {
1415                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1416                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1417                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1418         }
1419
1420         /* Configure DMA resource watermarks */
1421         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1422         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1423
1424         /* Enable buffer manager */
1425         CSR_WRITE_4(sc, BGE_BMAN_MODE,
1426             BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1427
1428         /* Poll for buffer manager start indication */
1429         for (i = 0; i < BGE_TIMEOUT; i++) {
1430                 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1431                         break;
1432                 DELAY(10);
1433         }
1434
1435         if (i == BGE_TIMEOUT) {
1436                 if_printf(&sc->arpcom.ac_if,
1437                           "buffer manager failed to start\n");
1438                 return(ENXIO);
1439         }
1440
1441         /* Enable flow-through queues */
1442         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1443         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1444
1445         /* Wait until queue initialization is complete */
1446         for (i = 0; i < BGE_TIMEOUT; i++) {
1447                 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1448                         break;
1449                 DELAY(10);
1450         }
1451
1452         if (i == BGE_TIMEOUT) {
1453                 if_printf(&sc->arpcom.ac_if,
1454                           "flow-through queue init failed\n");
1455                 return(ENXIO);
1456         }
1457
1458         /*
1459          * Summary of rings supported by the controller:
1460          *
1461          * Standard Receive Producer Ring
1462          * - This ring is used to feed receive buffers for "standard"
1463          *   sized frames (typically 1536 bytes) to the controller.
1464          *
1465          * Jumbo Receive Producer Ring
1466          * - This ring is used to feed receive buffers for jumbo sized
1467          *   frames (i.e. anything bigger than the "standard" frames)
1468          *   to the controller.
1469          *
1470          * Mini Receive Producer Ring
1471          * - This ring is used to feed receive buffers for "mini"
1472          *   sized frames to the controller.
1473          * - This feature required external memory for the controller
1474          *   but was never used in a production system.  Should always
1475          *   be disabled.
1476          *
1477          * Receive Return Ring
1478          * - After the controller has placed an incoming frame into a
1479          *   receive buffer that buffer is moved into a receive return
1480          *   ring.  The driver is then responsible to passing the
1481          *   buffer up to the stack.  Many versions of the controller
1482          *   support multiple RR rings.
1483          *
1484          * Send Ring
1485          * - This ring is used for outgoing frames.  Many versions of
1486          *   the controller support multiple send rings.
1487          */
1488
1489         /* Initialize the standard receive producer ring control block. */
1490         rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1491         rcb->bge_hostaddr.bge_addr_lo =
1492             BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1493         rcb->bge_hostaddr.bge_addr_hi =
1494             BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1495         if (BGE_IS_5705_PLUS(sc)) {
1496                 /*
1497                  * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1498                  * Bits 15-2 : Reserved (should be 0)
1499                  * Bit 1     : 1 = Ring Disabled, 0 = Ring Enabled
1500                  * Bit 0     : Reserved
1501                  */
1502                 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1503         } else {
1504                 /*
1505                  * Ring size is always XXX entries
1506                  * Bits 31-16: Maximum RX frame size
1507                  * Bits 15-2 : Reserved (should be 0)
1508                  * Bit 1     : 1 = Ring Disabled, 0 = Ring Enabled
1509                  * Bit 0     : Reserved
1510                  */
1511                 rcb->bge_maxlen_flags =
1512                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1513         }
1514         rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1515         /* Write the standard receive producer ring control block. */
1516         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1517         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1518         CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1519         CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1520         /* Reset the standard receive producer ring producer index. */
1521         bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1522
1523         /*
1524          * Initialize the jumbo RX producer ring control
1525          * block.  We set the 'ring disabled' bit in the
1526          * flags field until we're actually ready to start
1527          * using this ring (i.e. once we set the MTU
1528          * high enough to require it).
1529          */
1530         if (BGE_IS_JUMBO_CAPABLE(sc)) {
1531                 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1532                 /* Get the jumbo receive producer ring RCB parameters. */
1533                 rcb->bge_hostaddr.bge_addr_lo =
1534                     BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1535                 rcb->bge_hostaddr.bge_addr_hi =
1536                     BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1537                 rcb->bge_maxlen_flags =
1538                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1539                     BGE_RCB_FLAG_RING_DISABLED);
1540                 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1541                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1542                     rcb->bge_hostaddr.bge_addr_hi);
1543                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1544                     rcb->bge_hostaddr.bge_addr_lo);
1545                 /* Program the jumbo receive producer ring RCB parameters. */
1546                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1547                     rcb->bge_maxlen_flags);
1548                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1549                 /* Reset the jumbo receive producer ring producer index. */
1550                 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1551         }
1552
1553         /* Disable the mini receive producer ring RCB. */
1554         if (BGE_IS_5700_FAMILY(sc)) {
1555                 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1556                 rcb->bge_maxlen_flags =
1557                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1558                 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1559                     rcb->bge_maxlen_flags);
1560                 /* Reset the mini receive producer ring producer index. */
1561                 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1562         }
1563
1564         /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
1565         if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
1566             (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
1567              sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
1568              sc->bge_chipid == BGE_CHIPID_BCM5906_A2)) {
1569                 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1570                     (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1571         }
1572
1573         /*
1574          * The BD ring replenish thresholds control how often the
1575          * hardware fetches new BD's from the producer rings in host
1576          * memory.  Setting the value too low on a busy system can
1577          * starve the hardware and recue the throughpout.
1578          *
1579          * Set the BD ring replentish thresholds. The recommended
1580          * values are 1/8th the number of descriptors allocated to
1581          * each ring.
1582          */
1583         if (BGE_IS_5705_PLUS(sc))
1584                 val = 8;
1585         else
1586                 val = BGE_STD_RX_RING_CNT / 8;
1587         CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1588         if (BGE_IS_JUMBO_CAPABLE(sc)) {
1589                 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1590                     BGE_JUMBO_RX_RING_CNT/8);
1591         }
1592
1593         /*
1594          * Disable all send rings by setting the 'ring disabled' bit
1595          * in the flags field of all the TX send ring control blocks,
1596          * located in NIC memory.
1597          */
1598         if (!BGE_IS_5705_PLUS(sc)) {
1599                 /* 5700 to 5704 had 16 send rings. */
1600                 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
1601         } else {
1602                 limit = 1;
1603         }
1604         vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1605         for (i = 0; i < limit; i++) {
1606                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1607                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1608                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1609                 vrcb += sizeof(struct bge_rcb);
1610         }
1611
1612         /* Configure send ring RCB 0 (we use only the first ring) */
1613         vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1614         BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1615         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1616         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1617         RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1618             BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1619         RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1620             BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1621
1622         /*
1623          * Disable all receive return rings by setting the
1624          * 'ring diabled' bit in the flags field of all the receive
1625          * return ring control blocks, located in NIC memory.
1626          */
1627         if (!BGE_IS_5705_PLUS(sc))
1628                 limit = BGE_RX_RINGS_MAX;
1629         else if (sc->bge_asicrev == BGE_ASICREV_BCM5755)
1630                 limit = 4;
1631         else
1632                 limit = 1;
1633         /* Disable all receive return rings. */
1634         vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1635         for (i = 0; i < limit; i++) {
1636                 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1637                 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1638                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1639                     BGE_RCB_FLAG_RING_DISABLED);
1640                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1641                 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1642                     (i * (sizeof(uint64_t))), 0);
1643                 vrcb += sizeof(struct bge_rcb);
1644         }
1645
1646         /*
1647          * Set up receive return ring 0.  Note that the NIC address
1648          * for RX return rings is 0x0.  The return rings live entirely
1649          * within the host, so the nicaddr field in the RCB isn't used.
1650          */
1651         vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1652         BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1653         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1654         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1655         RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1656         RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1657             BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1658
1659         /* Set random backoff seed for TX */
1660         CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1661             sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1662             sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1663             sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1664             BGE_TX_BACKOFF_SEED_MASK);
1665
1666         /* Set inter-packet gap */
1667         CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1668
1669         /*
1670          * Specify which ring to use for packets that don't match
1671          * any RX rules.
1672          */
1673         CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1674
1675         /*
1676          * Configure number of RX lists. One interrupt distribution
1677          * list, sixteen active lists, one bad frames class.
1678          */
1679         CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1680
1681         /* Inialize RX list placement stats mask. */
1682         CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1683         CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1684
1685         /* Disable host coalescing until we get it set up */
1686         CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1687
1688         /* Poll to make sure it's shut down. */
1689         for (i = 0; i < BGE_TIMEOUT; i++) {
1690                 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1691                         break;
1692                 DELAY(10);
1693         }
1694
1695         if (i == BGE_TIMEOUT) {
1696                 if_printf(&sc->arpcom.ac_if,
1697                           "host coalescing engine failed to idle\n");
1698                 return(ENXIO);
1699         }
1700
1701         /* Set up host coalescing defaults */
1702         CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1703         CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1704         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_coal_bds);
1705         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_coal_bds);
1706         if (!BGE_IS_5705_PLUS(sc)) {
1707                 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT,
1708                     sc->bge_rx_coal_ticks_int);
1709                 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT,
1710                     sc->bge_tx_coal_ticks_int);
1711         }
1712         /*
1713          * NOTE:
1714          * The datasheet (57XX-PG105-R) says BCM5705+ do not
1715          * have following two registers; obviously it is wrong.
1716          */
1717         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, sc->bge_rx_coal_bds_int);
1718         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, sc->bge_tx_coal_bds_int);
1719
1720         /* Set up address of statistics block */
1721         if (!BGE_IS_5705_PLUS(sc)) {
1722                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1723                     BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1724                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1725                     BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1726
1727                 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1728                 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1729                 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1730         }
1731
1732         /* Set up address of status block */
1733         bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1734         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1735             BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1736         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1737             BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1738
1739         /*
1740          * Set up status block partail update size.
1741          *
1742          * Because only single TX ring, RX produce ring and Rx return ring
1743          * are used, ask device to update only minimum part of status block
1744          * except for BCM5700 AX/BX, whose status block partial update size
1745          * can't be configured.
1746          */
1747         if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1748             sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
1749                 /* XXX Actually reserved on BCM5700 AX/BX */
1750                 val = BGE_STATBLKSZ_FULL;
1751         } else {
1752                 val = BGE_STATBLKSZ_32BYTE;
1753         }
1754 #if 0
1755         if (sc->bge_flags & BGE_FLAG_STATUS_TAG) {
1756                 val |= 0x00000200 | 0x00000400;
1757                 if_printf(&sc->arpcom.ac_if, "enable TMR\n");
1758         }
1759 #endif
1760
1761         /* Turn on host coalescing state machine */
1762         CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1763
1764         /* Turn on RX BD completion state machine and enable attentions */
1765         CSR_WRITE_4(sc, BGE_RBDC_MODE,
1766             BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1767
1768         /* Turn on RX list placement state machine */
1769         CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1770
1771         /* Turn on RX list selector state machine. */
1772         if (!BGE_IS_5705_PLUS(sc))
1773                 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1774
1775         val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1776             BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1777             BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1778             BGE_MACMODE_FRMHDR_DMA_ENB;
1779
1780         if (sc->bge_flags & BGE_FLAG_TBI)
1781                 val |= BGE_PORTMODE_TBI;
1782         else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
1783                 val |= BGE_PORTMODE_GMII;
1784         else
1785                 val |= BGE_PORTMODE_MII;
1786
1787         /* Turn on DMA, clear stats */
1788         CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1789
1790         /* Set misc. local control, enable interrupts on attentions */
1791         CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1792
1793 #ifdef notdef
1794         /* Assert GPIO pins for PHY reset */
1795         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1796             BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1797         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1798             BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1799 #endif
1800
1801         /* Turn on DMA completion state machine */
1802         if (!BGE_IS_5705_PLUS(sc))
1803                 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1804
1805         /* Turn on write DMA state machine */
1806         val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1807         if (BGE_IS_5755_PLUS(sc)) {
1808                 /* Enable host coalescing bug fix. */
1809                 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1810         }
1811         if (sc->bge_asicrev == BGE_ASICREV_BCM5785) {
1812                 /* Request larger DMA burst size to get better performance. */
1813                 val |= BGE_WDMAMODE_BURST_ALL_DATA;
1814         }
1815         CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1816         DELAY(40);
1817
1818         if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
1819             sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1820             sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1821             sc->bge_asicrev == BGE_ASICREV_BCM57780) {
1822                 /*
1823                  * Enable fix for read DMA FIFO overruns.
1824                  * The fix is to limit the number of RX BDs
1825                  * the hardware would fetch at a fime.
1826                  */
1827                 val = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL);
1828                 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL,
1829                     val| BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
1830         }
1831
1832         /* Turn on read DMA state machine */
1833         val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1834         if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1835             sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1836             sc->bge_asicrev == BGE_ASICREV_BCM57780)
1837                 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1838                   BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1839                   BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1840         if (sc->bge_flags & BGE_FLAG_PCIE)
1841                 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1842         CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1843         DELAY(40);
1844
1845         /* Turn on RX data completion state machine */
1846         CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1847
1848         /* Turn on RX BD initiator state machine */
1849         CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1850
1851         /* Turn on RX data and RX BD initiator state machine */
1852         CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1853
1854         /* Turn on Mbuf cluster free state machine */
1855         if (!BGE_IS_5705_PLUS(sc))
1856                 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1857
1858         /* Turn on send BD completion state machine */
1859         CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1860
1861         /* Turn on send data completion state machine */
1862         val = BGE_SDCMODE_ENABLE;
1863         if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
1864                 val |= BGE_SDCMODE_CDELAY; 
1865         CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1866
1867         /* Turn on send data initiator state machine */
1868         CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1869
1870         /* Turn on send BD initiator state machine */
1871         CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1872
1873         /* Turn on send BD selector state machine */
1874         CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1875
1876         CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1877         CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1878             BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1879
1880         /* ack/clear link change events */
1881         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1882             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1883             BGE_MACSTAT_LINK_CHANGED);
1884         CSR_WRITE_4(sc, BGE_MI_STS, 0);
1885
1886         /*
1887          * Enable attention when the link has changed state for
1888          * devices that use auto polling.
1889          */
1890         if (sc->bge_flags & BGE_FLAG_TBI) {
1891                 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1892         } else {
1893                 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
1894                         CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
1895                         DELAY(80);
1896                 }
1897                 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1898                     sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
1899                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1900                             BGE_EVTENB_MI_INTERRUPT);
1901                 }
1902         }
1903
1904         /*
1905          * Clear any pending link state attention.
1906          * Otherwise some link state change events may be lost until attention
1907          * is cleared by bge_intr() -> bge_softc.bge_link_upd() sequence.
1908          * It's not necessary on newer BCM chips - perhaps enabling link
1909          * state change attentions implies clearing pending attention.
1910          */
1911         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1912             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1913             BGE_MACSTAT_LINK_CHANGED);
1914
1915         /* Enable link state change attentions. */
1916         BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1917
1918         return(0);
1919 }
1920
1921 /*
1922  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1923  * against our list and return its name if we find a match. Note
1924  * that since the Broadcom controller contains VPD support, we
1925  * can get the device name string from the controller itself instead
1926  * of the compiled-in string. This is a little slow, but it guarantees
1927  * we'll always announce the right product name.
1928  */
1929 static int
1930 bge_probe(device_t dev)
1931 {
1932         const struct bge_type *t;
1933         uint16_t product, vendor;
1934
1935         product = pci_get_device(dev);
1936         vendor = pci_get_vendor(dev);
1937
1938         for (t = bge_devs; t->bge_name != NULL; t++) {
1939                 if (vendor == t->bge_vid && product == t->bge_did)
1940                         break;
1941         }
1942         if (t->bge_name == NULL)
1943                 return(ENXIO);
1944
1945         device_set_desc(dev, t->bge_name);
1946         return(0);
1947 }
1948
1949 static int
1950 bge_attach(device_t dev)
1951 {
1952         struct ifnet *ifp;
1953         struct bge_softc *sc;
1954         uint32_t hwcfg = 0, misccfg;
1955         int error = 0, rid, capmask;
1956         uint8_t ether_addr[ETHER_ADDR_LEN];
1957         uint16_t product, vendor;
1958         driver_intr_t *intr_func;
1959
1960         sc = device_get_softc(dev);
1961         sc->bge_dev = dev;
1962         callout_init(&sc->bge_stat_timer);
1963         lwkt_serialize_init(&sc->bge_jslot_serializer);
1964
1965 #ifndef BURN_BRIDGES
1966         if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1967                 uint32_t irq, mem;
1968
1969                 irq = pci_read_config(dev, PCIR_INTLINE, 4);
1970                 mem = pci_read_config(dev, BGE_PCI_BAR0, 4);
1971
1972                 device_printf(dev, "chip is in D%d power mode "
1973                     "-- setting to D0\n", pci_get_powerstate(dev));
1974
1975                 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1976
1977                 pci_write_config(dev, PCIR_INTLINE, irq, 4);
1978                 pci_write_config(dev, BGE_PCI_BAR0, mem, 4);
1979         }
1980 #endif  /* !BURN_BRIDGE */
1981
1982         /*
1983          * Map control/status registers.
1984          */
1985         pci_enable_busmaster(dev);
1986
1987         rid = BGE_PCI_BAR0;
1988         sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1989             RF_ACTIVE);
1990
1991         if (sc->bge_res == NULL) {
1992                 device_printf(dev, "couldn't map memory\n");
1993                 return ENXIO;
1994         }
1995
1996         sc->bge_btag = rman_get_bustag(sc->bge_res);
1997         sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
1998
1999         /* Save various chip information */
2000         sc->bge_chipid =
2001             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2002             BGE_PCIMISCCTL_ASICREV_SHIFT;
2003         if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG)
2004                 sc->bge_chipid = pci_read_config(dev, BGE_PCI_PRODID_ASICREV, 4);
2005         sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2006         sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2007
2008         /* Save chipset family. */
2009         switch (sc->bge_asicrev) {
2010         case BGE_ASICREV_BCM5755:
2011         case BGE_ASICREV_BCM5761:
2012         case BGE_ASICREV_BCM5784:
2013         case BGE_ASICREV_BCM5785:
2014         case BGE_ASICREV_BCM5787:
2015         case BGE_ASICREV_BCM57780:
2016             sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
2017                 BGE_FLAG_5705_PLUS;
2018             break;
2019
2020         case BGE_ASICREV_BCM5700:
2021         case BGE_ASICREV_BCM5701:
2022         case BGE_ASICREV_BCM5703:
2023         case BGE_ASICREV_BCM5704:
2024                 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2025                 break;
2026
2027         case BGE_ASICREV_BCM5714_A0:
2028         case BGE_ASICREV_BCM5780:
2029         case BGE_ASICREV_BCM5714:
2030                 sc->bge_flags |= BGE_FLAG_5714_FAMILY;
2031                 /* Fall through */
2032
2033         case BGE_ASICREV_BCM5750:
2034         case BGE_ASICREV_BCM5752:
2035         case BGE_ASICREV_BCM5906:
2036                 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2037                 /* Fall through */
2038
2039         case BGE_ASICREV_BCM5705:
2040                 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2041                 break;
2042         }
2043
2044         if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
2045                 sc->bge_flags |= BGE_FLAG_NO_EEPROM;
2046
2047         misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK;
2048         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2049             (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2050              misccfg == BGE_MISCCFG_BOARD_ID_5788M))
2051                 sc->bge_flags |= BGE_FLAG_5788;
2052
2053         /* BCM5755 or higher and BCM5906 have short DMA bug. */
2054         if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
2055                 sc->bge_flags |= BGE_FLAG_SHORTDMA;
2056
2057         /*
2058          * Check if this is a PCI-X or PCI Express device.
2059          */
2060         if (BGE_IS_5705_PLUS(sc)) {
2061                 if (pci_is_pcie(dev)) {
2062                         sc->bge_flags |= BGE_FLAG_PCIE;
2063                         sc->bge_pciecap = pci_get_pciecap_ptr(sc->bge_dev);
2064                         pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096);
2065                 }
2066         } else {
2067                 /*
2068                  * Check if the device is in PCI-X Mode.
2069                  * (This bit is not valid on PCI Express controllers.)
2070                  */
2071                 if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
2072                     BGE_PCISTATE_PCI_BUSMODE) == 0) {
2073                         sc->bge_flags |= BGE_FLAG_PCIX;
2074                         sc->bge_pcixcap = pci_get_pcixcap_ptr(sc->bge_dev);
2075                         sc->bge_mbox_reorder = device_getenv_int(sc->bge_dev,
2076                             "mbox_reorder", 0);
2077                 }
2078         }
2079         device_printf(dev, "CHIP ID 0x%08x; "
2080                       "ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
2081                       sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
2082                       (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X"
2083                       : ((sc->bge_flags & BGE_FLAG_PCIE) ?
2084                         "PCI-E" : "PCI"));
2085
2086         /*
2087          * The 40bit DMA bug applies to the 5714/5715 controllers and is
2088          * not actually a MAC controller bug but an issue with the embedded
2089          * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
2090          */
2091         if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
2092                 sc->bge_flags |= BGE_FLAG_MAXADDR_40BIT;
2093
2094         /* Identify the chips that use an CPMU. */
2095         if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2096             sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2097             sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2098             sc->bge_asicrev == BGE_ASICREV_BCM57780)
2099                 sc->bge_flags |= BGE_FLAG_CPMU;
2100
2101         /*
2102          * When using the BCM5701 in PCI-X mode, data corruption has
2103          * been observed in the first few bytes of some received packets.
2104          * Aligning the packet buffer in memory eliminates the corruption.
2105          * Unfortunately, this misaligns the packet payloads.  On platforms
2106          * which do not support unaligned accesses, we will realign the
2107          * payloads by copying the received packets.
2108          */
2109         if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
2110             (sc->bge_flags & BGE_FLAG_PCIX))
2111                 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
2112
2113         if (!BGE_IS_5788(sc) && sc->bge_asicrev != BGE_ASICREV_BCM5700) {
2114                 if (device_getenv_int(dev, "status_tag", 1)) {
2115                         sc->bge_flags |= BGE_FLAG_STATUS_TAG;
2116                         sc->bge_pci_miscctl = BGE_PCIMISCCTL_TAGGED_STATUS;
2117                         if (bootverbose)
2118                                 device_printf(dev, "enable status tag\n");
2119                 }
2120         }
2121
2122         /*
2123          * Set various PHY quirk flags.
2124          */
2125         product = pci_get_device(dev);
2126         vendor = pci_get_vendor(dev);
2127
2128         if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
2129              sc->bge_asicrev == BGE_ASICREV_BCM5701) &&
2130             pci_get_subvendor(dev) == PCI_VENDOR_DELL)
2131                 sc->bge_phy_flags |= BGE_PHY_NO_3LED;
2132
2133         capmask = MII_CAPMASK_DEFAULT;
2134         if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 &&
2135              (misccfg == 0x4000 || misccfg == 0x8000)) ||
2136             (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2137              vendor == PCI_VENDOR_BROADCOM &&
2138              (product == PCI_PRODUCT_BROADCOM_BCM5901 ||
2139               product == PCI_PRODUCT_BROADCOM_BCM5901A2 ||
2140               product == PCI_PRODUCT_BROADCOM_BCM5705F)) ||
2141             (vendor == PCI_VENDOR_BROADCOM &&
2142              (product == PCI_PRODUCT_BROADCOM_BCM5751F ||
2143               product == PCI_PRODUCT_BROADCOM_BCM5753F ||
2144               product == PCI_PRODUCT_BROADCOM_BCM5787F)) ||
2145             product == PCI_PRODUCT_BROADCOM_BCM57790 ||
2146             sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2147                 /* 10/100 only */
2148                 capmask &= ~BMSR_EXTSTAT;
2149         }
2150
2151         sc->bge_phy_flags |= BGE_PHY_WIRESPEED;
2152         if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
2153             (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2154              (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2155               sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
2156             sc->bge_asicrev == BGE_ASICREV_BCM5906)
2157                 sc->bge_phy_flags &= ~BGE_PHY_WIRESPEED;
2158
2159         if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2160             sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2161                 sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
2162
2163         if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2164             sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2165                 sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
2166
2167         if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2168                 sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
2169
2170         if (BGE_IS_5705_PLUS(sc) &&
2171             sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2172             /* sc->bge_asicrev != BGE_ASICREV_BCM5717 && */
2173             sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
2174             /* sc->bge_asicrev != BGE_ASICREV_BCM57765 && */
2175             sc->bge_asicrev != BGE_ASICREV_BCM57780) {
2176                 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2177                     sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2178                     sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2179                     sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2180                         if (product != PCI_PRODUCT_BROADCOM_BCM5722 &&
2181                             product != PCI_PRODUCT_BROADCOM_BCM5756)
2182                                 sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
2183                         if (product == PCI_PRODUCT_BROADCOM_BCM5755M)
2184                                 sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
2185                 } else {
2186                         sc->bge_phy_flags |= BGE_PHY_BER_BUG;
2187                 }
2188         }
2189
2190         /* Allocate interrupt */
2191         rid = 0;
2192         sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2193             RF_SHAREABLE | RF_ACTIVE);
2194         if (sc->bge_irq == NULL) {
2195                 device_printf(dev, "couldn't map interrupt\n");
2196                 error = ENXIO;
2197                 goto fail;
2198         }
2199
2200         /* Initialize if_name earlier, so if_printf could be used */
2201         ifp = &sc->arpcom.ac_if;
2202         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2203
2204         /* Try to reset the chip. */
2205         bge_reset(sc);
2206
2207         if (bge_chipinit(sc)) {
2208                 device_printf(dev, "chip initialization failed\n");
2209                 error = ENXIO;
2210                 goto fail;
2211         }
2212
2213         /*
2214          * Get station address
2215          */
2216         error = bge_get_eaddr(sc, ether_addr);
2217         if (error) {
2218                 device_printf(dev, "failed to read station address\n");
2219                 goto fail;
2220         }
2221
2222         /* 5705/5750 limits RX return ring to 512 entries. */
2223         if (BGE_IS_5705_PLUS(sc))
2224                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2225         else
2226                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2227
2228         error = bge_dma_alloc(sc);
2229         if (error)
2230                 goto fail;
2231
2232         /* Set default tuneable values. */
2233         sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2234         sc->bge_rx_coal_ticks = BGE_RX_COAL_TICKS_DEF;
2235         sc->bge_tx_coal_ticks = BGE_TX_COAL_TICKS_DEF;
2236         sc->bge_rx_coal_bds = BGE_RX_COAL_BDS_DEF;
2237         sc->bge_tx_coal_bds = BGE_TX_COAL_BDS_DEF;
2238         if (sc->bge_flags & BGE_FLAG_STATUS_TAG) {
2239                 sc->bge_rx_coal_ticks_int = BGE_RX_COAL_TICKS_DEF;
2240                 sc->bge_tx_coal_ticks_int = BGE_TX_COAL_TICKS_DEF;
2241                 sc->bge_rx_coal_bds_int = BGE_RX_COAL_BDS_DEF;
2242                 sc->bge_tx_coal_bds_int = BGE_TX_COAL_BDS_DEF;
2243         } else {
2244                 sc->bge_rx_coal_ticks_int = BGE_RX_COAL_TICKS_MIN;
2245                 sc->bge_tx_coal_ticks_int = BGE_TX_COAL_TICKS_MIN;
2246                 sc->bge_rx_coal_bds_int = BGE_RX_COAL_BDS_MIN;
2247                 sc->bge_tx_coal_bds_int = BGE_TX_COAL_BDS_MIN;
2248         }
2249
2250         /* Set up ifnet structure */
2251         ifp->if_softc = sc;
2252         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2253         ifp->if_ioctl = bge_ioctl;
2254         ifp->if_start = bge_start;
2255 #ifdef DEVICE_POLLING
2256         ifp->if_poll = bge_poll;
2257 #endif
2258         ifp->if_watchdog = bge_watchdog;
2259         ifp->if_init = bge_init;
2260         ifp->if_mtu = ETHERMTU;
2261         ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2262         ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
2263         ifq_set_ready(&ifp->if_snd);
2264
2265         /*
2266          * 5700 B0 chips do not support checksumming correctly due
2267          * to hardware bugs.
2268          */
2269         if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) {
2270                 ifp->if_capabilities |= IFCAP_HWCSUM;
2271                 ifp->if_hwassist = BGE_CSUM_FEATURES;
2272         }
2273         ifp->if_capenable = ifp->if_capabilities;
2274
2275         /*
2276          * Figure out what sort of media we have by checking the
2277          * hardware config word in the first 32k of NIC internal memory,
2278          * or fall back to examining the EEPROM if necessary.
2279          * Note: on some BCM5700 cards, this value appears to be unset.
2280          * If that's the case, we have to rely on identifying the NIC
2281          * by its PCI subsystem ID, as we do below for the SysKonnect
2282          * SK-9D41.
2283          */
2284         if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) {
2285                 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2286         } else {
2287                 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2288                                     sizeof(hwcfg))) {
2289                         device_printf(dev, "failed to read EEPROM\n");
2290                         error = ENXIO;
2291                         goto fail;
2292                 }
2293                 hwcfg = ntohl(hwcfg);
2294         }
2295
2296         /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2297         if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41 ||
2298             (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
2299                 if (BGE_IS_5714_FAMILY(sc))
2300                         sc->bge_flags |= BGE_FLAG_MII_SERDES;
2301                 else
2302                         sc->bge_flags |= BGE_FLAG_TBI;
2303         }
2304
2305         /* Setup MI MODE */
2306         if (sc->bge_flags & BGE_FLAG_CPMU)
2307                 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST;
2308         else
2309                 sc->bge_mi_mode = BGE_MIMODE_BASE;
2310         if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705) {
2311                 /* Enable auto polling for BCM570[0-5]. */
2312                 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL;
2313         }
2314
2315         /* Setup link status update stuffs */
2316         if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2317             sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
2318                 sc->bge_link_upd = bge_bcm5700_link_upd;
2319                 sc->bge_link_chg = BGE_MACSTAT_MI_INTERRUPT;
2320         } else if (sc->bge_flags & BGE_FLAG_TBI) {
2321                 sc->bge_link_upd = bge_tbi_link_upd;
2322                 sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED;
2323         } else if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
2324                 sc->bge_link_upd = bge_autopoll_link_upd;
2325                 sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED;
2326         } else {
2327                 sc->bge_link_upd = bge_copper_link_upd;
2328                 sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED;
2329         }
2330
2331         /*
2332          * Broadcom's own driver always assumes the internal
2333          * PHY is at GMII address 1.  On some chips, the PHY responds
2334          * to accesses at all addresses, which could cause us to
2335          * bogusly attach the PHY 32 times at probe type.  Always
2336          * restricting the lookup to address 1 is simpler than
2337          * trying to figure out which chips revisions should be
2338          * special-cased.
2339          */
2340         sc->bge_phyno = 1;
2341
2342         if (sc->bge_flags & BGE_FLAG_TBI) {
2343                 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2344                     bge_ifmedia_upd, bge_ifmedia_sts);
2345                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2346                 ifmedia_add(&sc->bge_ifmedia,
2347                     IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2348                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2349                 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2350                 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2351         } else {
2352                 struct mii_probe_args mii_args;
2353
2354                 mii_probe_args_init(&mii_args, bge_ifmedia_upd, bge_ifmedia_sts);
2355                 mii_args.mii_probemask = 1 << sc->bge_phyno;
2356                 mii_args.mii_capmask = capmask;
2357
2358                 error = mii_probe(dev, &sc->bge_miibus, &mii_args);
2359                 if (error) {
2360                         device_printf(dev, "MII without any PHY!\n");
2361                         goto fail;
2362                 }
2363         }
2364
2365         /*
2366          * Create sysctl nodes.
2367          */
2368         sysctl_ctx_init(&sc->bge_sysctl_ctx);
2369         sc->bge_sysctl_tree = SYSCTL_ADD_NODE(&sc->bge_sysctl_ctx,
2370                                               SYSCTL_STATIC_CHILDREN(_hw),
2371                                               OID_AUTO,
2372                                               device_get_nameunit(dev),
2373                                               CTLFLAG_RD, 0, "");
2374         if (sc->bge_sysctl_tree == NULL) {
2375                 device_printf(dev, "can't add sysctl node\n");
2376                 error = ENXIO;
2377                 goto fail;
2378         }
2379
2380         SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2381                         SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2382                         OID_AUTO, "rx_coal_ticks",
2383                         CTLTYPE_INT | CTLFLAG_RW,
2384                         sc, 0, bge_sysctl_rx_coal_ticks, "I",
2385                         "Receive coalescing ticks (usec).");
2386         SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2387                         SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2388                         OID_AUTO, "tx_coal_ticks",
2389                         CTLTYPE_INT | CTLFLAG_RW,
2390                         sc, 0, bge_sysctl_tx_coal_ticks, "I",
2391                         "Transmit coalescing ticks (usec).");
2392         SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2393                         SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2394                         OID_AUTO, "rx_coal_bds",
2395                         CTLTYPE_INT | CTLFLAG_RW,
2396                         sc, 0, bge_sysctl_rx_coal_bds, "I",
2397                         "Receive max coalesced BD count.");
2398         SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2399                         SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2400                         OID_AUTO, "tx_coal_bds",
2401                         CTLTYPE_INT | CTLFLAG_RW,
2402                         sc, 0, bge_sysctl_tx_coal_bds, "I",
2403                         "Transmit max coalesced BD count.");
2404         if (sc->bge_flags & BGE_FLAG_PCIE) {
2405                 /*
2406                  * A common design characteristic for many Broadcom
2407                  * client controllers is that they only support a
2408                  * single outstanding DMA read operation on the PCIe
2409                  * bus. This means that it will take twice as long to
2410                  * fetch a TX frame that is split into header and
2411                  * payload buffers as it does to fetch a single,
2412                  * contiguous TX frame (2 reads vs. 1 read). For these
2413                  * controllers, coalescing buffers to reduce the number
2414                  * of memory reads is effective way to get maximum
2415                  * performance(about 940Mbps).  Without collapsing TX
2416                  * buffers the maximum TCP bulk transfer performance
2417                  * is about 850Mbps. However forcing coalescing mbufs
2418                  * consumes a lot of CPU cycles, so leave it off by
2419                  * default.
2420                  */
2421                 SYSCTL_ADD_INT(&sc->bge_sysctl_ctx,
2422                                SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2423                                OID_AUTO, "force_defrag", CTLFLAG_RW,
2424                                &sc->bge_force_defrag, 0,
2425                                "Force defragment on TX path");
2426         }
2427         if (sc->bge_flags & BGE_FLAG_STATUS_TAG) {
2428                 if (!BGE_IS_5705_PLUS(sc)) {
2429                         SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2430                             SYSCTL_CHILDREN(sc->bge_sysctl_tree), OID_AUTO,
2431                             "rx_coal_ticks_int", CTLTYPE_INT | CTLFLAG_RW,
2432                             sc, 0, bge_sysctl_rx_coal_ticks_int, "I",
2433                             "Receive coalescing ticks "
2434                             "during interrupt (usec).");
2435                         SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2436                             SYSCTL_CHILDREN(sc->bge_sysctl_tree), OID_AUTO,
2437                             "tx_coal_ticks_int", CTLTYPE_INT | CTLFLAG_RW,
2438                             sc, 0, bge_sysctl_tx_coal_ticks_int, "I",
2439                             "Transmit coalescing ticks "
2440                             "during interrupt (usec).");
2441                 }
2442                 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2443                     SYSCTL_CHILDREN(sc->bge_sysctl_tree), OID_AUTO,
2444                     "rx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2445                     sc, 0, bge_sysctl_rx_coal_bds_int, "I",
2446                     "Receive max coalesced BD count during interrupt.");
2447                 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2448                     SYSCTL_CHILDREN(sc->bge_sysctl_tree), OID_AUTO,
2449                     "tx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2450                     sc, 0, bge_sysctl_tx_coal_bds_int, "I",
2451                     "Transmit max coalesced BD count during interrupt.");
2452         }
2453
2454         /*
2455          * Call MI attach routine.
2456          */
2457         ether_ifattach(ifp, ether_addr, NULL);
2458
2459         if (sc->bge_flags & BGE_FLAG_STATUS_TAG)
2460                 intr_func = bge_intr_status_tag;
2461         else
2462                 intr_func = bge_intr;
2463
2464         error = bus_setup_intr(dev, sc->bge_irq, INTR_MPSAFE, intr_func, sc,
2465             &sc->bge_intrhand, ifp->if_serializer);
2466         if (error) {
2467                 ether_ifdetach(ifp);
2468                 device_printf(dev, "couldn't set up irq\n");
2469                 goto fail;
2470         }
2471
2472         ifp->if_cpuid = rman_get_cpuid(sc->bge_irq);
2473         KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
2474
2475         return(0);
2476 fail:
2477         bge_detach(dev);
2478         return(error);
2479 }
2480
2481 static int
2482 bge_detach(device_t dev)
2483 {
2484         struct bge_softc *sc = device_get_softc(dev);
2485
2486         if (device_is_attached(dev)) {
2487                 struct ifnet *ifp = &sc->arpcom.ac_if;
2488
2489                 lwkt_serialize_enter(ifp->if_serializer);
2490                 bge_stop(sc);
2491                 bge_reset(sc);
2492                 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2493                 lwkt_serialize_exit(ifp->if_serializer);
2494
2495                 ether_ifdetach(ifp);
2496         }
2497
2498         if (sc->bge_flags & BGE_FLAG_TBI)
2499                 ifmedia_removeall(&sc->bge_ifmedia);
2500         if (sc->bge_miibus)
2501                 device_delete_child(dev, sc->bge_miibus);
2502         bus_generic_detach(dev);
2503
2504         if (sc->bge_irq != NULL)
2505                 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2506
2507         if (sc->bge_res != NULL)
2508                 bus_release_resource(dev, SYS_RES_MEMORY,
2509                     BGE_PCI_BAR0, sc->bge_res);
2510
2511         if (sc->bge_sysctl_tree != NULL)
2512                 sysctl_ctx_free(&sc->bge_sysctl_ctx);
2513
2514         bge_dma_free(sc);
2515
2516         return 0;
2517 }
2518
2519 static void
2520 bge_reset(struct bge_softc *sc)
2521 {
2522         device_t dev;
2523         uint32_t cachesize, command, pcistate, reset;
2524         void (*write_op)(struct bge_softc *, uint32_t, uint32_t);
2525         int i, val = 0;
2526
2527         dev = sc->bge_dev;
2528
2529         if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
2530             sc->bge_asicrev != BGE_ASICREV_BCM5906) {
2531                 if (sc->bge_flags & BGE_FLAG_PCIE)
2532                         write_op = bge_writemem_direct;
2533                 else
2534                         write_op = bge_writemem_ind;
2535         } else {
2536                 write_op = bge_writereg_ind;
2537         }
2538
2539         /* Save some important PCI state. */
2540         cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2541         command = pci_read_config(dev, BGE_PCI_CMD, 4);
2542         pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2543
2544         pci_write_config(dev, BGE_PCI_MISC_CTL,
2545             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2546             BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2547             sc->bge_pci_miscctl, 4);
2548
2549         /* Disable fastboot on controllers that support it. */
2550         if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
2551             BGE_IS_5755_PLUS(sc)) {
2552                 if (bootverbose)
2553                         if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n");
2554                 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2555         }
2556
2557         /*
2558          * Write the magic number to SRAM at offset 0xB50.
2559          * When firmware finishes its initialization it will
2560          * write ~BGE_MAGIC_NUMBER to the same location.
2561          */
2562         bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2563
2564         reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2565
2566         /* XXX: Broadcom Linux driver. */
2567         if (sc->bge_flags & BGE_FLAG_PCIE) {
2568                 if (CSR_READ_4(sc, 0x7e2c) == 0x60)     /* PCIE 1.0 */
2569                         CSR_WRITE_4(sc, 0x7e2c, 0x20);
2570                 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2571                         /* Prevent PCIE link training during global reset */
2572                         CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2573                         reset |= (1<<29);
2574                 }
2575         }
2576
2577         /* 
2578          * Set GPHY Power Down Override to leave GPHY
2579          * powered up in D0 uninitialized.
2580          */
2581         if (BGE_IS_5705_PLUS(sc) && (sc->bge_flags & BGE_FLAG_CPMU) == 0)
2582                 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
2583
2584         /* Issue global reset */
2585         write_op(sc, BGE_MISC_CFG, reset);
2586
2587         if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2588                 uint32_t status, ctrl;
2589
2590                 status = CSR_READ_4(sc, BGE_VCPU_STATUS);
2591                 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
2592                     status | BGE_VCPU_STATUS_DRV_RESET);
2593                 ctrl = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
2594                 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
2595                     ctrl & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
2596         }
2597
2598         DELAY(1000);
2599
2600         /* XXX: Broadcom Linux driver. */
2601         if (sc->bge_flags & BGE_FLAG_PCIE) {
2602                 uint16_t devctl;
2603
2604                 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2605                         uint32_t v;
2606
2607                         DELAY(500000); /* wait for link training to complete */
2608                         v = pci_read_config(dev, 0xc4, 4);
2609                         pci_write_config(dev, 0xc4, v | (1<<15), 4);
2610                 }
2611
2612                 /* Clear enable no snoop and disable relaxed ordering. */
2613                 devctl = pci_read_config(dev,
2614                     sc->bge_pciecap + PCIER_DEVCTRL, 2);
2615                 devctl &= ~(PCIEM_DEVCTL_RELAX_ORDER | PCIEM_DEVCTL_NOSNOOP);
2616                 pci_write_config(dev, sc->bge_pciecap + PCIER_DEVCTRL,
2617                     devctl, 2);
2618
2619                 /* Clear error status. */
2620                 pci_write_config(dev, sc->bge_pciecap + PCIER_DEVSTS,
2621                     PCIEM_DEVSTS_CORR_ERR |
2622                     PCIEM_DEVSTS_NFATAL_ERR |
2623                     PCIEM_DEVSTS_FATAL_ERR |
2624                     PCIEM_DEVSTS_UNSUPP_REQ, 2);
2625         }
2626
2627         /* Reset some of the PCI state that got zapped by reset */
2628         pci_write_config(dev, BGE_PCI_MISC_CTL,
2629             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2630             BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2631             sc->bge_pci_miscctl, 4);
2632         pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2633         pci_write_config(dev, BGE_PCI_CMD, command, 4);
2634         write_op(sc, BGE_MISC_CFG, (65 << 1));
2635
2636         /*
2637          * Disable PCI-X relaxed ordering to ensure status block update
2638          * comes first then packet buffer DMA. Otherwise driver may
2639          * read stale status block.
2640          */
2641         if (sc->bge_flags & BGE_FLAG_PCIX) {
2642                 uint16_t devctl;
2643
2644                 devctl = pci_read_config(dev,
2645                     sc->bge_pcixcap + PCIXR_COMMAND, 2);
2646                 devctl &= ~PCIXM_COMMAND_ERO;
2647                 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
2648                         devctl &= ~PCIXM_COMMAND_MAX_READ;
2649                         devctl |= PCIXM_COMMAND_MAX_READ_2048;
2650                 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
2651                         devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
2652                             PCIXM_COMMAND_MAX_READ);
2653                         devctl |= PCIXM_COMMAND_MAX_READ_2048;
2654                 }
2655                 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
2656                     devctl, 2);
2657         }
2658
2659         /* Enable memory arbiter. */
2660         if (BGE_IS_5714_FAMILY(sc)) {
2661                 uint32_t val;
2662
2663                 val = CSR_READ_4(sc, BGE_MARB_MODE);
2664                 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2665         } else {
2666                 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2667         }
2668
2669         if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2670                 for (i = 0; i < BGE_TIMEOUT; i++) {
2671                         val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2672                         if (val & BGE_VCPU_STATUS_INIT_DONE)
2673                                 break;
2674                         DELAY(100);
2675                 }
2676                 if (i == BGE_TIMEOUT) {
2677                         if_printf(&sc->arpcom.ac_if, "reset timed out\n");
2678                         return;
2679                 }
2680         } else {
2681                 /*
2682                  * Poll until we see the 1's complement of the magic number.
2683                  * This indicates that the firmware initialization
2684                  * is complete.
2685                  */
2686                 for (i = 0; i < BGE_FIRMWARE_TIMEOUT; i++) {
2687                         val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2688                         if (val == ~BGE_MAGIC_NUMBER)
2689                                 break;
2690                         DELAY(10);
2691                 }
2692                 if (i == BGE_FIRMWARE_TIMEOUT) {
2693                         if_printf(&sc->arpcom.ac_if, "firmware handshake "
2694                                   "timed out, found 0x%08x\n", val);
2695                         return;
2696                 }
2697         }
2698
2699         /*
2700          * XXX Wait for the value of the PCISTATE register to
2701          * return to its original pre-reset state. This is a
2702          * fairly good indicator of reset completion. If we don't
2703          * wait for the reset to fully complete, trying to read
2704          * from the device's non-PCI registers may yield garbage
2705          * results.
2706          */
2707         for (i = 0; i < BGE_TIMEOUT; i++) {
2708                 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2709                         break;
2710                 DELAY(10);
2711         }
2712
2713         /* Fix up byte swapping */
2714         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
2715             BGE_MODECTL_BYTESWAP_DATA);
2716
2717         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2718
2719         /*
2720          * The 5704 in TBI mode apparently needs some special
2721          * adjustment to insure the SERDES drive level is set
2722          * to 1.2V.
2723          */
2724         if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
2725             (sc->bge_flags & BGE_FLAG_TBI)) {
2726                 uint32_t serdescfg;
2727
2728                 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2729                 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2730                 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2731         }
2732
2733         /* XXX: Broadcom Linux driver. */
2734         if ((sc->bge_flags & BGE_FLAG_PCIE) &&
2735             sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
2736             sc->bge_asicrev != BGE_ASICREV_BCM5785) {
2737                 uint32_t v;
2738
2739                 /* Enable Data FIFO protection. */
2740                 v = CSR_READ_4(sc, 0x7c00);
2741                 CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2742         }
2743
2744         DELAY(10000);
2745 }
2746
2747 /*
2748  * Frame reception handling. This is called if there's a frame
2749  * on the receive return list.
2750  *
2751  * Note: we have to be able to handle two possibilities here:
2752  * 1) the frame is from the jumbo recieve ring
2753  * 2) the frame is from the standard receive ring
2754  */
2755
2756 static void
2757 bge_rxeof(struct bge_softc *sc, uint16_t rx_prod)
2758 {
2759         struct ifnet *ifp;
2760         int stdcnt = 0, jumbocnt = 0;
2761
2762         ifp = &sc->arpcom.ac_if;
2763
2764         while (sc->bge_rx_saved_considx != rx_prod) {
2765                 struct bge_rx_bd        *cur_rx;
2766                 uint32_t                rxidx;
2767                 struct mbuf             *m = NULL;
2768                 uint16_t                vlan_tag = 0;
2769                 int                     have_tag = 0;
2770
2771                 cur_rx =
2772             &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2773
2774                 rxidx = cur_rx->bge_idx;
2775                 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2776                 logif(rx_pkt);
2777
2778                 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2779                         have_tag = 1;
2780                         vlan_tag = cur_rx->bge_vlan_tag;
2781                 }
2782
2783                 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2784                         BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2785                         jumbocnt++;
2786
2787                         if (rxidx != sc->bge_jumbo) {
2788                                 ifp->if_ierrors++;
2789                                 if_printf(ifp, "sw jumbo index(%d) "
2790                                     "and hw jumbo index(%d) mismatch, drop!\n",
2791                                     sc->bge_jumbo, rxidx);
2792                                 bge_setup_rxdesc_jumbo(sc, rxidx);
2793                                 continue;
2794                         }
2795
2796                         m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx].bge_mbuf;
2797                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2798                                 ifp->if_ierrors++;
2799                                 bge_setup_rxdesc_jumbo(sc, sc->bge_jumbo);
2800                                 continue;
2801                         }
2802                         if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 0)) {
2803                                 ifp->if_ierrors++;
2804                                 bge_setup_rxdesc_jumbo(sc, sc->bge_jumbo);
2805                                 continue;
2806                         }
2807                 } else {
2808                         BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2809                         stdcnt++;
2810
2811                         if (rxidx != sc->bge_std) {
2812                                 ifp->if_ierrors++;
2813                                 if_printf(ifp, "sw std index(%d) "
2814                                     "and hw std index(%d) mismatch, drop!\n",
2815                                     sc->bge_std, rxidx);
2816                                 bge_setup_rxdesc_std(sc, rxidx);
2817                                 continue;
2818                         }
2819
2820                         m = sc->bge_cdata.bge_rx_std_chain[rxidx].bge_mbuf;
2821                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2822                                 ifp->if_ierrors++;
2823                                 bge_setup_rxdesc_std(sc, sc->bge_std);
2824                                 continue;
2825                         }
2826                         if (bge_newbuf_std(sc, sc->bge_std, 0)) {
2827                                 ifp->if_ierrors++;
2828                                 bge_setup_rxdesc_std(sc, sc->bge_std);
2829                                 continue;
2830                         }
2831                 }
2832
2833                 ifp->if_ipackets++;
2834 #if !defined(__i386__) && !defined(__x86_64__)
2835                 /*
2836                  * The x86 allows unaligned accesses, but for other
2837                  * platforms we must make sure the payload is aligned.
2838                  */
2839                 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
2840                         bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2841                             cur_rx->bge_len);
2842                         m->m_data += ETHER_ALIGN;
2843                 }
2844 #endif
2845                 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2846                 m->m_pkthdr.rcvif = ifp;
2847
2848                 if (ifp->if_capenable & IFCAP_RXCSUM) {
2849                         if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2850                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2851                                 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2852                                         m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2853                         }
2854                         if ((cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) &&
2855                             m->m_pkthdr.len >= BGE_MIN_FRAME) {
2856                                 m->m_pkthdr.csum_data =
2857                                         cur_rx->bge_tcp_udp_csum;
2858                                 m->m_pkthdr.csum_flags |=
2859                                         CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2860                         }
2861                 }
2862
2863                 /*
2864                  * If we received a packet with a vlan tag, pass it
2865                  * to vlan_input() instead of ether_input().
2866                  */
2867                 if (have_tag) {
2868                         m->m_flags |= M_VLANTAG;
2869                         m->m_pkthdr.ether_vlantag = vlan_tag;
2870                         have_tag = vlan_tag = 0;
2871                 }
2872                 ifp->if_input(ifp, m);
2873         }
2874
2875         bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2876         if (stdcnt)
2877                 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2878         if (jumbocnt)
2879                 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2880 }
2881
2882 static void
2883 bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
2884 {
2885         struct bge_tx_bd *cur_tx = NULL;
2886         struct ifnet *ifp;
2887
2888         ifp = &sc->arpcom.ac_if;
2889
2890         /*
2891          * Go through our tx ring and free mbufs for those
2892          * frames that have been sent.
2893          */
2894         while (sc->bge_tx_saved_considx != tx_cons) {
2895                 uint32_t idx = 0;
2896
2897                 idx = sc->bge_tx_saved_considx;
2898                 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2899                 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2900                         ifp->if_opackets++;
2901                 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2902                         bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
2903                             sc->bge_cdata.bge_tx_dmamap[idx]);
2904                         m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2905                         sc->bge_cdata.bge_tx_chain[idx] = NULL;
2906                 }
2907                 sc->bge_txcnt--;
2908                 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2909                 logif(tx_pkt);
2910         }
2911
2912         if (cur_tx != NULL &&
2913             (BGE_TX_RING_CNT - sc->bge_txcnt) >=
2914             (BGE_NSEG_RSVD + BGE_NSEG_SPARE))
2915                 ifp->if_flags &= ~IFF_OACTIVE;
2916
2917         if (sc->bge_txcnt == 0)
2918                 ifp->if_timer = 0;
2919
2920         if (!ifq_is_empty(&ifp->if_snd))
2921                 if_devstart(ifp);
2922 }
2923
2924 #ifdef DEVICE_POLLING
2925
2926 static void
2927 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2928 {
2929         struct bge_softc *sc = ifp->if_softc;
2930         struct bge_status_block *sblk = sc->bge_ldata.bge_status_block;
2931         uint16_t rx_prod, tx_cons;
2932
2933         switch(cmd) {
2934         case POLL_REGISTER:
2935                 bge_disable_intr(sc);
2936                 break;
2937         case POLL_DEREGISTER:
2938                 bge_enable_intr(sc);
2939                 break;
2940         case POLL_AND_CHECK_STATUS:
2941                 /*
2942                  * Process link state changes.
2943                  */
2944                 bge_link_poll(sc);
2945                 /* Fall through */
2946         case POLL_ONLY:
2947                 if (sc->bge_flags & BGE_FLAG_STATUS_TAG) {
2948                         sc->bge_status_tag = sblk->bge_status_tag;
2949                         /*
2950                          * Use a load fence to ensure that status_tag
2951                          * is saved  before rx_prod and tx_cons.
2952                          */
2953                         cpu_lfence();
2954                 }
2955                 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
2956                 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
2957                 if (ifp->if_flags & IFF_RUNNING) {
2958                         rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
2959                         if (sc->bge_rx_saved_considx != rx_prod)
2960                                 bge_rxeof(sc, rx_prod);
2961
2962                         tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
2963                         if (sc->bge_tx_saved_considx != tx_cons)
2964                                 bge_txeof(sc, tx_cons);
2965                 }
2966                 break;
2967         }
2968 }
2969
2970 #endif
2971
2972 static void
2973 bge_intr(void *xsc)
2974 {
2975         struct bge_softc *sc = xsc;
2976         struct ifnet *ifp = &sc->arpcom.ac_if;
2977
2978         logif(intr);
2979
2980         /*
2981          * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO.  Don't
2982          * disable interrupts by writing nonzero like we used to, since with
2983          * our current organization this just gives complications and
2984          * pessimizations for re-enabling interrupts.  We used to have races
2985          * instead of the necessary complications.  Disabling interrupts
2986          * would just reduce the chance of a status update while we are
2987          * running (by switching to the interrupt-mode coalescence
2988          * parameters), but this chance is already very low so it is more
2989          * efficient to get another interrupt than prevent it.
2990          *
2991          * We do the ack first to ensure another interrupt if there is a
2992          * status update after the ack.  We don't check for the status
2993          * changing later because it is more efficient to get another
2994          * interrupt than prevent it, not quite as above (not checking is
2995          * a smaller optimization than not toggling the interrupt enable,
2996          * since checking doesn't involve PCI accesses and toggling require
2997          * the status check).  So toggling would probably be a pessimization
2998          * even with MSI.  It would only be needed for using a task queue.
2999          */
3000         bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3001
3002         /*
3003          * Process link state changes.
3004          */
3005         bge_link_poll(sc);
3006
3007         if (ifp->if_flags & IFF_RUNNING) {
3008                 struct bge_status_block *sblk = sc->bge_ldata.bge_status_block;
3009                 uint16_t rx_prod, tx_cons;
3010
3011                 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
3012                 if (sc->bge_rx_saved_considx != rx_prod)
3013                         bge_rxeof(sc, rx_prod);
3014
3015                 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
3016                 if (sc->bge_tx_saved_considx != tx_cons)
3017                         bge_txeof(sc, tx_cons);
3018         }
3019
3020         if (sc->bge_coal_chg)
3021                 bge_coal_change(sc);
3022 }
3023
3024 static void
3025 bge_intr_status_tag(void *xsc)
3026 {
3027         struct bge_softc *sc = xsc;
3028         struct ifnet *ifp = &sc->arpcom.ac_if;
3029         struct bge_status_block *sblk = sc->bge_ldata.bge_status_block;
3030         uint16_t rx_prod, tx_cons;
3031         uint32_t status;
3032
3033         if (sc->bge_status_tag == sblk->bge_status_tag) {
3034                 uint32_t val;
3035
3036                 val = pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4);
3037                 if (val & BGE_PCISTAT_INTR_NOTACT)
3038                         return;
3039         }
3040
3041         /*
3042          * NOTE:
3043          * Interrupt will have to be disabled if tagged status
3044          * is used, else interrupt will always be asserted on
3045          * certain chips (at least on BCM5750 AX/BX).
3046          */
3047         bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3048
3049         sc->bge_status_tag = sblk->bge_status_tag;
3050         /*
3051          * Use a load fence to ensure that status_tag is saved 
3052          * before rx_prod and tx_cons.
3053          */
3054         cpu_lfence();
3055
3056         rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
3057         tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
3058         status = sblk->bge_status;
3059
3060         if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) || sc->bge_link_evt)
3061                 bge_link_poll(sc);
3062
3063         if (ifp->if_flags & IFF_RUNNING) {
3064                 if (sc->bge_rx_saved_considx != rx_prod)
3065                         bge_rxeof(sc, rx_prod);
3066
3067                 if (sc->bge_tx_saved_considx != tx_cons)
3068                         bge_txeof(sc, tx_cons);
3069         }
3070
3071         bge_writembx(sc, BGE_MBX_IRQ0_LO, sc->bge_status_tag << 24);
3072
3073         if (sc->bge_coal_chg)
3074                 bge_coal_change(sc);
3075 }
3076
3077 static void
3078 bge_tick(void *xsc)
3079 {
3080         struct bge_softc *sc = xsc;
3081         struct ifnet *ifp = &sc->arpcom.ac_if;
3082
3083         lwkt_serialize_enter(ifp->if_serializer);
3084
3085         if (BGE_IS_5705_PLUS(sc))
3086                 bge_stats_update_regs(sc);
3087         else
3088                 bge_stats_update(sc);
3089
3090         if (sc->bge_flags & BGE_FLAG_TBI) {
3091                 /*
3092                  * Since in TBI mode auto-polling can't be used we should poll
3093                  * link status manually. Here we register pending link event
3094                  * and trigger interrupt.
3095                  */
3096                 sc->bge_link_evt++;
3097                 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
3098                     BGE_IS_5788(sc))
3099                         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3100                 else
3101                         BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3102         } else if (!sc->bge_link) {
3103                 mii_tick(device_get_softc(sc->bge_miibus));
3104         }
3105
3106         callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
3107
3108         lwkt_serialize_exit(ifp->if_serializer);
3109 }
3110
3111 static void
3112 bge_stats_update_regs(struct bge_softc *sc)
3113 {
3114         struct ifnet *ifp = &sc->arpcom.ac_if;
3115         struct bge_mac_stats_regs stats;
3116         uint32_t *s;
3117         int i;
3118
3119         s = (uint32_t *)&stats;
3120         for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
3121                 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
3122                 s++;
3123         }
3124
3125         ifp->if_collisions +=
3126            (stats.dot3StatsSingleCollisionFrames +
3127            stats.dot3StatsMultipleCollisionFrames +
3128            stats.dot3StatsExcessiveCollisions +
3129            stats.dot3StatsLateCollisions) -
3130            ifp->if_collisions;
3131 }
3132
3133 static void
3134 bge_stats_update(struct bge_softc *sc)
3135 {
3136         struct ifnet *ifp = &sc->arpcom.ac_if;
3137         bus_size_t stats;
3138
3139         stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
3140
3141 #define READ_STAT(sc, stats, stat)      \
3142         CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
3143
3144         ifp->if_collisions +=
3145            (READ_STAT(sc, stats,
3146                 txstats.dot3StatsSingleCollisionFrames.bge_addr_lo) +
3147             READ_STAT(sc, stats,
3148                 txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo) +
3149             READ_STAT(sc, stats,
3150                 txstats.dot3StatsExcessiveCollisions.bge_addr_lo) +
3151             READ_STAT(sc, stats,
3152                 txstats.dot3StatsLateCollisions.bge_addr_lo)) -
3153            ifp->if_collisions;
3154
3155 #undef READ_STAT
3156
3157 #ifdef notdef
3158         ifp->if_collisions +=
3159            (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
3160            sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
3161            sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
3162            sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
3163            ifp->if_collisions;
3164 #endif
3165 }
3166
3167 /*
3168  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
3169  * pointers to descriptors.
3170  */
3171 static int
3172 bge_encap(struct bge_softc *sc, struct mbuf **m_head0, uint32_t *txidx)
3173 {
3174         struct bge_tx_bd *d = NULL;
3175         uint16_t csum_flags = 0;
3176         bus_dma_segment_t segs[BGE_NSEG_NEW];
3177         bus_dmamap_t map;
3178         int error, maxsegs, nsegs, idx, i;
3179         struct mbuf *m_head = *m_head0, *m_new;
3180
3181         if (m_head->m_pkthdr.csum_flags) {
3182                 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
3183                         csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3184                 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
3185                         csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3186                 if (m_head->m_flags & M_LASTFRAG)
3187                         csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3188                 else if (m_head->m_flags & M_FRAG)
3189                         csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3190         }
3191
3192         idx = *txidx;
3193         map = sc->bge_cdata.bge_tx_dmamap[idx];
3194
3195         maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - BGE_NSEG_RSVD;
3196         KASSERT(maxsegs >= BGE_NSEG_SPARE,
3197                 ("not enough segments %d", maxsegs));
3198
3199         if (maxsegs > BGE_NSEG_NEW)
3200                 maxsegs = BGE_NSEG_NEW;
3201
3202         /*
3203          * Pad outbound frame to BGE_MIN_FRAME for an unusual reason.
3204          * The bge hardware will pad out Tx runts to BGE_MIN_FRAME,
3205          * but when such padded frames employ the bge IP/TCP checksum
3206          * offload, the hardware checksum assist gives incorrect results
3207          * (possibly from incorporating its own padding into the UDP/TCP
3208          * checksum; who knows).  If we pad such runts with zeros, the
3209          * onboard checksum comes out correct.
3210          */
3211         if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
3212             m_head->m_pkthdr.len < BGE_MIN_FRAME) {
3213                 error = m_devpad(m_head, BGE_MIN_FRAME);
3214                 if (error)
3215                         goto back;
3216         }
3217
3218         if ((sc->bge_flags & BGE_FLAG_SHORTDMA) && m_head->m_next != NULL) {
3219                 m_new = bge_defrag_shortdma(m_head);
3220                 if (m_new == NULL) {
3221                         error = ENOBUFS;
3222                         goto back;
3223                 }
3224                 *m_head0 = m_head = m_new;
3225         }
3226         if (sc->bge_force_defrag && (sc->bge_flags & BGE_FLAG_PCIE) &&
3227             m_head->m_next != NULL) {
3228                 /*
3229                  * Forcefully defragment mbuf chain to overcome hardware
3230                  * limitation which only support a single outstanding
3231                  * DMA read operation.  If it fails, keep moving on using
3232                  * the original mbuf chain.
3233                  */
3234                 m_new = m_defrag(m_head, MB_DONTWAIT);
3235                 if (m_new != NULL)
3236                         *m_head0 = m_head = m_new;
3237         }
3238
3239         error = bus_dmamap_load_mbuf_defrag(sc->bge_cdata.bge_tx_mtag, map,
3240                         m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
3241         if (error)
3242                 goto back;
3243
3244         m_head = *m_head0;
3245         bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
3246
3247         for (i = 0; ; i++) {
3248                 d = &sc->bge_ldata.bge_tx_ring[idx];
3249
3250                 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
3251                 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
3252                 d->bge_len = segs[i].ds_len;
3253                 d->bge_flags = csum_flags;
3254
3255                 if (i == nsegs - 1)
3256                         break;
3257                 BGE_INC(idx, BGE_TX_RING_CNT);
3258         }
3259         /* Mark the last segment as end of packet... */
3260         d->bge_flags |= BGE_TXBDFLAG_END;
3261
3262         /* Set vlan tag to the first segment of the packet. */
3263         d = &sc->bge_ldata.bge_tx_ring[*txidx];
3264         if (m_head->m_flags & M_VLANTAG) {
3265                 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3266                 d->bge_vlan_tag = m_head->m_pkthdr.ether_vlantag;
3267         } else {
3268                 d->bge_vlan_tag = 0;
3269         }
3270
3271         /*
3272          * Insure that the map for this transmission is placed at
3273          * the array index of the last descriptor in this chain.
3274          */
3275         sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
3276         sc->bge_cdata.bge_tx_dmamap[idx] = map;
3277         sc->bge_cdata.bge_tx_chain[idx] = m_head;
3278         sc->bge_txcnt += nsegs;
3279
3280         BGE_INC(idx, BGE_TX_RING_CNT);
3281         *txidx = idx;
3282 back:
3283         if (error) {
3284                 m_freem(*m_head0);
3285                 *m_head0 = NULL;
3286         }
3287         return error;
3288 }
3289
3290 /*
3291  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3292  * to the mbuf data regions directly in the transmit descriptors.
3293  */
3294 static void
3295 bge_start(struct ifnet *ifp)
3296 {
3297         struct bge_softc *sc = ifp->if_softc;
3298         struct mbuf *m_head = NULL;
3299         uint32_t prodidx;
3300         int need_trans;
3301
3302         if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
3303                 return;
3304
3305         prodidx = sc->bge_tx_prodidx;
3306
3307         need_trans = 0;
3308         while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3309                 m_head = ifq_dequeue(&ifp->if_snd, NULL);
3310                 if (m_head == NULL)
3311                         break;
3312
3313                 /*
3314                  * XXX
3315                  * The code inside the if() block is never reached since we
3316                  * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3317                  * requests to checksum TCP/UDP in a fragmented packet.
3318                  * 
3319                  * XXX
3320                  * safety overkill.  If this is a fragmented packet chain
3321                  * with delayed TCP/UDP checksums, then only encapsulate
3322                  * it if we have enough descriptors to handle the entire
3323                  * chain at once.
3324                  * (paranoia -- may not actually be needed)
3325                  */
3326                 if ((m_head->m_flags & M_FIRSTFRAG) &&
3327                     (m_head->m_pkthdr.csum_flags & CSUM_DELAY_DATA)) {
3328                         if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3329                             m_head->m_pkthdr.csum_data + BGE_NSEG_RSVD) {
3330                                 ifp->if_flags |= IFF_OACTIVE;
3331                                 ifq_prepend(&ifp->if_snd, m_head);
3332                                 break;
3333                         }
3334                 }
3335
3336                 /*
3337                  * Sanity check: avoid coming within BGE_NSEG_RSVD
3338                  * descriptors of the end of the ring.  Also make
3339                  * sure there are BGE_NSEG_SPARE descriptors for
3340                  * jumbo buffers' defragmentation.
3341                  */
3342                 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3343                     (BGE_NSEG_RSVD + BGE_NSEG_SPARE)) {
3344                         ifp->if_flags |= IFF_OACTIVE;
3345                         ifq_prepend(&ifp->if_snd, m_head);
3346                         break;
3347                 }
3348
3349                 /*
3350                  * Pack the data into the transmit ring. If we
3351                  * don't have room, set the OACTIVE flag and wait
3352                  * for the NIC to drain the ring.
3353                  */
3354                 if (bge_encap(sc, &m_head, &prodidx)) {
3355                         ifp->if_flags |= IFF_OACTIVE;
3356                         ifp->if_oerrors++;
3357                         break;
3358                 }
3359                 need_trans = 1;
3360
3361                 ETHER_BPF_MTAP(ifp, m_head);
3362         }
3363
3364         if (!need_trans)
3365                 return;
3366
3367         /* Transmit */
3368         bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3369         /* 5700 b2 errata */
3370         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3371                 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3372
3373         sc->bge_tx_prodidx = prodidx;
3374
3375         /*
3376          * Set a timeout in case the chip goes out to lunch.
3377          */
3378         ifp->if_timer = 5;
3379 }
3380
3381 static void
3382 bge_init(void *xsc)
3383 {
3384         struct bge_softc *sc = xsc;
3385         struct ifnet *ifp = &sc->arpcom.ac_if;
3386         uint16_t *m;
3387         uint32_t mode;
3388
3389         ASSERT_SERIALIZED(ifp->if_serializer);
3390
3391         /* Cancel pending I/O and flush buffers. */
3392         bge_stop(sc);
3393         bge_reset(sc);
3394         bge_chipinit(sc);
3395
3396         /*
3397          * Init the various state machines, ring
3398          * control blocks and firmware.
3399          */
3400         if (bge_blockinit(sc)) {
3401                 if_printf(ifp, "initialization failure\n");
3402                 bge_stop(sc);
3403                 return;
3404         }
3405
3406         /* Specify MTU. */
3407         CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3408             ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
3409
3410         /* Load our MAC address. */
3411         m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
3412         CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3413         CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3414
3415         /* Enable or disable promiscuous mode as needed. */
3416         bge_setpromisc(sc);
3417
3418         /* Program multicast filter. */
3419         bge_setmulti(sc);
3420
3421         /* Init RX ring. */
3422         if (bge_init_rx_ring_std(sc)) {
3423                 if_printf(ifp, "RX ring initialization failed\n");
3424                 bge_stop(sc);
3425                 return;
3426         }
3427
3428         /*
3429          * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3430          * memory to insure that the chip has in fact read the first
3431          * entry of the ring.
3432          */
3433         if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3434                 uint32_t                v, i;
3435                 for (i = 0; i < 10; i++) {
3436                         DELAY(20);
3437                         v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3438                         if (v == (MCLBYTES - ETHER_ALIGN))
3439                                 break;
3440                 }
3441                 if (i == 10)
3442                         if_printf(ifp, "5705 A0 chip failed to load RX ring\n");
3443         }
3444
3445         /* Init jumbo RX ring. */
3446         if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) {
3447                 if (bge_init_rx_ring_jumbo(sc)) {
3448                         if_printf(ifp, "Jumbo RX ring initialization failed\n");
3449                         bge_stop(sc);
3450                         return;
3451                 }
3452         }
3453
3454         /* Init our RX return ring index */
3455         sc->bge_rx_saved_considx = 0;
3456
3457         /* Init TX ring. */
3458         bge_init_tx_ring(sc);
3459
3460         /* Enable TX MAC state machine lockup fix. */
3461         mode = CSR_READ_4(sc, BGE_TX_MODE);
3462         if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
3463                 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
3464         /* Turn on transmitter */
3465         CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
3466
3467         /* Turn on receiver */
3468         BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3469
3470         /*
3471          * Set the number of good frames to receive after RX MBUF
3472          * Low Watermark has been reached.  After the RX MAC receives
3473          * this number of frames, it will drop subsequent incoming
3474          * frames until the MBUF High Watermark is reached.
3475          */
3476         CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
3477
3478         /* Tell firmware we're alive. */
3479         BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3480
3481         /* Enable host interrupts if polling(4) is not enabled. */
3482         PCI_SETBIT(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA, 4);
3483 #ifdef DEVICE_POLLING
3484         if (ifp->if_flags & IFF_POLLING)
3485                 bge_disable_intr(sc);
3486         else
3487 #endif
3488         bge_enable_intr(sc);
3489
3490         bge_ifmedia_upd(ifp);
3491
3492         ifp->if_flags |= IFF_RUNNING;
3493         ifp->if_flags &= ~IFF_OACTIVE;
3494
3495         callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
3496 }
3497
3498 /*
3499  * Set media options.
3500  */
3501 static int
3502 bge_ifmedia_upd(struct ifnet *ifp)
3503 {
3504         struct bge_softc *sc = ifp->if_softc;
3505
3506         /* If this is a 1000baseX NIC, enable the TBI port. */
3507         if (sc->bge_flags & BGE_FLAG_TBI) {
3508                 struct ifmedia *ifm = &sc->bge_ifmedia;
3509
3510                 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3511                         return(EINVAL);
3512
3513                 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3514                 case IFM_AUTO:
3515                         /*
3516                          * The BCM5704 ASIC appears to have a special
3517                          * mechanism for programming the autoneg
3518                          * advertisement registers in TBI mode.
3519                          */
3520                         if (!bge_fake_autoneg &&
3521                             sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3522                                 uint32_t sgdig;
3523
3524                                 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3525                                 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3526                                 sgdig |= BGE_SGDIGCFG_AUTO |
3527                                          BGE_SGDIGCFG_PAUSE_CAP |
3528                                          BGE_SGDIGCFG_ASYM_PAUSE;
3529                                 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3530                                             sgdig | BGE_SGDIGCFG_SEND);
3531                                 DELAY(5);
3532                                 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3533                         }
3534                         break;
3535                 case IFM_1000_SX:
3536                         if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3537                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
3538                                     BGE_MACMODE_HALF_DUPLEX);
3539                         } else {
3540                                 BGE_SETBIT(sc, BGE_MAC_MODE,
3541                                     BGE_MACMODE_HALF_DUPLEX);
3542                         }
3543                         break;
3544                 default:
3545                         return(EINVAL);
3546                 }
3547         } else {
3548                 struct mii_data *mii = device_get_softc(sc->bge_miibus);
3549
3550                 sc->bge_link_evt++;
3551                 sc->bge_link = 0;
3552                 if (mii->mii_instance) {
3553                         struct mii_softc *miisc;
3554
3555                         LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3556                                 mii_phy_reset(miisc);
3557                 }
3558                 mii_mediachg(mii);
3559
3560                 /*
3561                  * Force an interrupt so that we will call bge_link_upd
3562                  * if needed and clear any pending link state attention.
3563                  * Without this we are not getting any further interrupts
3564                  * for link state changes and thus will not UP the link and
3565                  * not be able to send in bge_start.  The only way to get
3566                  * things working was to receive a packet and get an RX
3567                  * intr.
3568                  *
3569                  * bge_tick should help for fiber cards and we might not
3570                  * need to do this here if BGE_FLAG_TBI is set but as
3571                  * we poll for fiber anyway it should not harm.
3572                  */
3573                 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
3574                     BGE_IS_5788(sc))
3575                         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3576                 else
3577                         BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3578         }
3579         return(0);
3580 }
3581
3582 /*
3583  * Report current media status.
3584  */
3585 static void
3586 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3587 {
3588         struct bge_softc *sc = ifp->if_softc;
3589
3590         if (sc->bge_flags & BGE_FLAG_TBI) {
3591                 ifmr->ifm_status = IFM_AVALID;
3592                 ifmr->ifm_active = IFM_ETHER;
3593                 if (CSR_READ_4(sc, BGE_MAC_STS) &
3594                     BGE_MACSTAT_TBI_PCS_SYNCHED) {
3595                         ifmr->ifm_status |= IFM_ACTIVE;
3596                 } else {
3597                         ifmr->ifm_active |= IFM_NONE;
3598                         return;
3599                 }
3600
3601                 ifmr->ifm_active |= IFM_1000_SX;
3602                 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3603                         ifmr->ifm_active |= IFM_HDX;    
3604                 else
3605                         ifmr->ifm_active |= IFM_FDX;
3606         } else {
3607                 struct mii_data *mii = device_get_softc(sc->bge_miibus);
3608
3609                 mii_pollstat(mii);
3610                 ifmr->ifm_active = mii->mii_media_active;
3611                 ifmr->ifm_status = mii->mii_media_status;
3612         }
3613 }
3614
3615 static int
3616 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
3617 {
3618         struct bge_softc *sc = ifp->if_softc;
3619         struct ifreq *ifr = (struct ifreq *)data;
3620         int mask, error = 0;
3621
3622         ASSERT_SERIALIZED(ifp->if_serializer);
3623
3624         switch (command) {
3625         case SIOCSIFMTU:
3626                 if ((!BGE_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) ||
3627                     (BGE_IS_JUMBO_CAPABLE(sc) &&
3628                      ifr->ifr_mtu > BGE_JUMBO_MTU)) {
3629                         error = EINVAL;
3630                 } else if (ifp->if_mtu != ifr->ifr_mtu) {
3631                         ifp->if_mtu = ifr->ifr_mtu;
3632                         if (ifp->if_flags & IFF_RUNNING)
3633                                 bge_init(sc);
3634                 }
3635                 break;
3636         case SIOCSIFFLAGS:
3637                 if (ifp->if_flags & IFF_UP) {
3638                         if (ifp->if_flags & IFF_RUNNING) {
3639                                 mask = ifp->if_flags ^ sc->bge_if_flags;
3640
3641                                 /*
3642                                  * If only the state of the PROMISC flag
3643                                  * changed, then just use the 'set promisc
3644                                  * mode' command instead of reinitializing
3645                                  * the entire NIC. Doing a full re-init
3646                                  * means reloading the firmware and waiting
3647                                  * for it to start up, which may take a
3648                                  * second or two.  Similarly for ALLMULTI.
3649                                  */
3650                                 if (mask & IFF_PROMISC)
3651                                         bge_setpromisc(sc);
3652                                 if (mask & IFF_ALLMULTI)
3653                                         bge_setmulti(sc);
3654                         } else {
3655                                 bge_init(sc);
3656                         }
3657                 } else if (ifp->if_flags & IFF_RUNNING) {
3658                         bge_stop(sc);
3659                 }
3660                 sc->bge_if_flags = ifp->if_flags;
3661                 break;
3662         case SIOCADDMULTI:
3663         case SIOCDELMULTI:
3664                 if (ifp->if_flags & IFF_RUNNING)
3665                         bge_setmulti(sc);
3666                 break;
3667         case SIOCSIFMEDIA:
3668         case SIOCGIFMEDIA:
3669                 if (sc->bge_flags & BGE_FLAG_TBI) {
3670                         error = ifmedia_ioctl(ifp, ifr,
3671                             &sc->bge_ifmedia, command);
3672                 } else {
3673                         struct mii_data *mii;
3674
3675                         mii = device_get_softc(sc->bge_miibus);
3676                         error = ifmedia_ioctl(ifp, ifr,
3677                                               &mii->mii_media, command);
3678                 }
3679                 break;
3680         case SIOCSIFCAP:
3681                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3682                 if (mask & IFCAP_HWCSUM) {
3683                         ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
3684                         if (IFCAP_HWCSUM & ifp->if_capenable)
3685                                 ifp->if_hwassist = BGE_CSUM_FEATURES;
3686                         else
3687                                 ifp->if_hwassist = 0;
3688                 }
3689                 break;
3690         default:
3691                 error = ether_ioctl(ifp, command, data);
3692                 break;
3693         }
3694         return error;
3695 }
3696
3697 static void
3698 bge_watchdog(struct ifnet *ifp)
3699 {
3700         struct bge_softc *sc = ifp->if_softc;
3701
3702         if_printf(ifp, "watchdog timeout -- resetting\n");
3703
3704         bge_init(sc);
3705
3706         ifp->if_oerrors++;
3707
3708         if (!ifq_is_empty(&ifp->if_snd))
3709                 if_devstart(ifp);
3710 }
3711
3712 /*
3713  * Stop the adapter and free any mbufs allocated to the
3714  * RX and TX lists.
3715  */
3716 static void
3717 bge_stop(struct bge_softc *sc)
3718 {
3719         struct ifnet *ifp = &sc->arpcom.ac_if;
3720
3721         ASSERT_SERIALIZED(ifp->if_serializer);
3722
3723         callout_stop(&sc->bge_stat_timer);
3724
3725         /*
3726          * Disable all of the receiver blocks
3727          */
3728         bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3729         bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3730         bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3731         if (BGE_IS_5700_FAMILY(sc))
3732                 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3733         bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3734         bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3735         bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3736
3737         /*
3738          * Disable all of the transmit blocks
3739          */
3740         bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3741         bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3742         bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3743         bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3744         bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3745         if (BGE_IS_5700_FAMILY(sc))
3746                 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3747         bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3748
3749         /*
3750          * Shut down all of the memory managers and related
3751          * state machines.
3752          */
3753         bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3754         bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3755         if (BGE_IS_5700_FAMILY(sc))
3756                 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3757         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3758         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3759         if (!BGE_IS_5705_PLUS(sc)) {
3760                 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3761                 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3762         }
3763
3764         /* Disable host interrupts. */
3765         bge_disable_intr(sc);
3766
3767         /*
3768          * Tell firmware we're shutting down.
3769          */
3770         BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3771
3772         /* Free the RX lists. */
3773         bge_free_rx_ring_std(sc);
3774
3775         /* Free jumbo RX list. */
3776         if (BGE_IS_JUMBO_CAPABLE(sc))
3777                 bge_free_rx_ring_jumbo(sc);
3778
3779         /* Free TX buffers. */
3780         bge_free_tx_ring(sc);
3781
3782         sc->bge_status_tag = 0;
3783         sc->bge_link = 0;
3784         sc->bge_coal_chg = 0;
3785
3786         sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3787
3788         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3789         ifp->if_timer = 0;
3790 }
3791
3792 /*
3793  * Stop all chip I/O so that the kernel's probe routines don't
3794  * get confused by errant DMAs when rebooting.
3795  */
3796 static void
3797 bge_shutdown(device_t dev)
3798 {
3799         struct bge_softc *sc = device_get_softc(dev);
3800         struct ifnet *ifp = &sc->arpcom.ac_if;
3801
3802         lwkt_serialize_enter(ifp->if_serializer);
3803         bge_stop(sc);
3804         bge_reset(sc);
3805         lwkt_serialize_exit(ifp->if_serializer);
3806 }
3807
3808 static int
3809 bge_suspend(device_t dev)
3810 {
3811         struct bge_softc *sc = device_get_softc(dev);
3812         struct ifnet *ifp = &sc->arpcom.ac_if;
3813
3814         lwkt_serialize_enter(ifp->if_serializer);
3815         bge_stop(sc);
3816         lwkt_serialize_exit(ifp->if_serializer);
3817
3818         return 0;
3819 }
3820
3821 static int
3822 bge_resume(device_t dev)
3823 {
3824         struct bge_softc *sc = device_get_softc(dev);
3825         struct ifnet *ifp = &sc->arpcom.ac_if;
3826
3827         lwkt_serialize_enter(ifp->if_serializer);
3828
3829         if (ifp->if_flags & IFF_UP) {
3830                 bge_init(sc);
3831
3832                 if (!ifq_is_empty(&ifp->if_snd))
3833                         if_devstart(ifp);
3834         }
3835
3836         lwkt_serialize_exit(ifp->if_serializer);
3837
3838         return 0;
3839 }
3840
3841 static void
3842 bge_setpromisc(struct bge_softc *sc)
3843 {
3844         struct ifnet *ifp = &sc->arpcom.ac_if;
3845
3846         if (ifp->if_flags & IFF_PROMISC)
3847                 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3848         else
3849                 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3850 }
3851
3852 static void
3853 bge_dma_free(struct bge_softc *sc)
3854 {
3855         int i;
3856
3857         /* Destroy RX mbuf DMA stuffs. */
3858         if (sc->bge_cdata.bge_rx_mtag != NULL) {
3859                 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3860                         bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
3861                             sc->bge_cdata.bge_rx_std_dmamap[i]);
3862                 }
3863                 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
3864                                    sc->bge_cdata.bge_rx_tmpmap);
3865                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
3866         }
3867
3868         /* Destroy TX mbuf DMA stuffs. */
3869         if (sc->bge_cdata.bge_tx_mtag != NULL) {
3870                 for (i = 0; i < BGE_TX_RING_CNT; i++) {
3871                         bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
3872                             sc->bge_cdata.bge_tx_dmamap[i]);
3873                 }
3874                 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
3875         }
3876
3877         /* Destroy standard RX ring */
3878         bge_dma_block_free(sc->bge_cdata.bge_rx_std_ring_tag,
3879                            sc->bge_cdata.bge_rx_std_ring_map,
3880                            sc->bge_ldata.bge_rx_std_ring);
3881
3882         if (BGE_IS_JUMBO_CAPABLE(sc))
3883                 bge_free_jumbo_mem(sc);
3884
3885         /* Destroy RX return ring */
3886         bge_dma_block_free(sc->bge_cdata.bge_rx_return_ring_tag,
3887                            sc->bge_cdata.bge_rx_return_ring_map,
3888                            sc->bge_ldata.bge_rx_return_ring);
3889
3890         /* Destroy TX ring */
3891         bge_dma_block_free(sc->bge_cdata.bge_tx_ring_tag,
3892                            sc->bge_cdata.bge_tx_ring_map,
3893                            sc->bge_ldata.bge_tx_ring);
3894
3895         /* Destroy status block */
3896         bge_dma_block_free(sc->bge_cdata.bge_status_tag,
3897                            sc->bge_cdata.bge_status_map,
3898                            sc->bge_ldata.bge_status_block);
3899
3900         /* Destroy statistics block */
3901         bge_dma_block_free(sc->bge_cdata.bge_stats_tag,
3902                            sc->bge_cdata.bge_stats_map,
3903                            sc->bge_ldata.bge_stats);
3904
3905         /* Destroy the parent tag */
3906         if (sc->bge_cdata.bge_parent_tag != NULL)
3907                 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
3908 }
3909
3910 static int
3911 bge_dma_alloc(struct bge_softc *sc)
3912 {
3913         struct ifnet *ifp = &sc->arpcom.ac_if;
3914         int i, error;
3915         bus_addr_t lowaddr;
3916
3917         lowaddr = BUS_SPACE_MAXADDR;
3918         if (sc->bge_flags & BGE_FLAG_MAXADDR_40BIT)
3919                 lowaddr = BGE_DMA_MAXADDR_40BIT;
3920
3921         /*
3922          * Allocate the parent bus DMA tag appropriate for PCI.
3923          *
3924          * All of the NetExtreme/NetLink controllers have 4GB boundary
3925          * DMA bug.
3926          * Whenever an address crosses a multiple of the 4GB boundary
3927          * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
3928          * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
3929          * state machine will lockup and cause the device to hang.
3930          */
3931         error = bus_dma_tag_create(NULL, 1, BGE_DMA_BOUNDARY_4G,
3932                                    lowaddr, BUS_SPACE_MAXADDR,
3933                                    NULL, NULL,
3934                                    BUS_SPACE_MAXSIZE_32BIT, 0,
3935                                    BUS_SPACE_MAXSIZE_32BIT,
3936                                    0, &sc->bge_cdata.bge_parent_tag);
3937         if (error) {
3938                 if_printf(ifp, "could not allocate parent dma tag\n");
3939                 return error;
3940         }
3941
3942         /*
3943          * Create DMA tag and maps for RX mbufs.
3944          */
3945         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0,
3946                                    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3947                                    NULL, NULL, MCLBYTES, 1, MCLBYTES,
3948                                    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
3949                                    &sc->bge_cdata.bge_rx_mtag);
3950         if (error) {
3951                 if_printf(ifp, "could not allocate RX mbuf dma tag\n");
3952                 return error;
3953         }
3954
3955         error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag,
3956                                   BUS_DMA_WAITOK, &sc->bge_cdata.bge_rx_tmpmap);
3957         if (error) {
3958                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
3959                 sc->bge_cdata.bge_rx_mtag = NULL;
3960                 return error;
3961         }
3962
3963         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3964                 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag,
3965                                           BUS_DMA_WAITOK,
3966                                           &sc->bge_cdata.bge_rx_std_dmamap[i]);
3967                 if (error) {
3968                         int j;
3969
3970                         for (j = 0; j < i; ++j) {
3971                                 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
3972                                         sc->bge_cdata.bge_rx_std_dmamap[j]);
3973                         }
3974                         bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
3975                         sc->bge_cdata.bge_rx_mtag = NULL;
3976
3977                         if_printf(ifp, "could not create DMA map for RX\n");
3978                         return error;
3979                 }
3980         }
3981
3982         /*
3983          * Create DMA tag and maps for TX mbufs.
3984          */
3985         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0,
3986                                    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3987                                    NULL, NULL,
3988                                    BGE_JUMBO_FRAMELEN, BGE_NSEG_NEW, MCLBYTES,
3989                                    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK |
3990                                    BUS_DMA_ONEBPAGE,
3991                                    &sc->bge_cdata.bge_tx_mtag);
3992         if (error) {
3993                 if_printf(ifp, "could not allocate TX mbuf dma tag\n");
3994                 return error;
3995         }
3996
3997         for (i = 0; i < BGE_TX_RING_CNT; i++) {
3998                 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag,
3999                                           BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
4000                                           &sc->bge_cdata.bge_tx_dmamap[i]);
4001                 if (error) {
4002                         int j;
4003
4004                         for (j = 0; j < i; ++j) {
4005                                 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
4006                                         sc->bge_cdata.bge_tx_dmamap[j]);
4007                         }
4008                         bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
4009                         sc->bge_cdata.bge_tx_mtag = NULL;
4010
4011                         if_printf(ifp, "could not create DMA map for TX\n");
4012                         return error;
4013                 }
4014         }
4015
4016         /*
4017          * Create DMA stuffs for standard RX ring.
4018          */
4019         error = bge_dma_block_alloc(sc, BGE_STD_RX_RING_SZ,
4020                                     &sc->bge_cdata.bge_rx_std_ring_tag,
4021                                     &sc->bge_cdata.bge_rx_std_ring_map,
4022                                     (void *)&sc->bge_ldata.bge_rx_std_ring,
4023                                     &sc->bge_ldata.bge_rx_std_ring_paddr);
4024         if (error) {
4025                 if_printf(ifp, "could not create std RX ring\n");
4026                 return error;
4027         }
4028
4029         /*
4030          * Create jumbo buffer pool.
4031          */
4032         if (BGE_IS_JUMBO_CAPABLE(sc)) {
4033                 error = bge_alloc_jumbo_mem(sc);
4034                 if (error) {
4035                         if_printf(ifp, "could not create jumbo buffer pool\n");
4036                         return error;
4037                 }
4038         }
4039
4040         /*
4041          * Create DMA stuffs for RX return ring.
4042          */
4043         error = bge_dma_block_alloc(sc, BGE_RX_RTN_RING_SZ(sc),
4044                                     &sc->bge_cdata.bge_rx_return_ring_tag,
4045                                     &sc->bge_cdata.bge_rx_return_ring_map,
4046                                     (void *)&sc->bge_ldata.bge_rx_return_ring,
4047                                     &sc->bge_ldata.bge_rx_return_ring_paddr);
4048         if (error) {
4049                 if_printf(ifp, "could not create RX ret ring\n");
4050                 return error;
4051         }
4052
4053         /*
4054          * Create DMA stuffs for TX ring.
4055          */
4056         error = bge_dma_block_alloc(sc, BGE_TX_RING_SZ,
4057                                     &sc->bge_cdata.bge_tx_ring_tag,
4058                                     &sc->bge_cdata.bge_tx_ring_map,
4059                                     (void *)&sc->bge_ldata.bge_tx_ring,
4060                                     &sc->bge_ldata.bge_tx_ring_paddr);
4061         if (error) {
4062                 if_printf(ifp, "could not create TX ring\n");
4063                 return error;
4064         }
4065
4066         /*
4067          * Create DMA stuffs for status block.
4068          */
4069         error = bge_dma_block_alloc(sc, BGE_STATUS_BLK_SZ,
4070                                     &sc->bge_cdata.bge_status_tag,
4071                                     &sc->bge_cdata.bge_status_map,
4072                                     (void *)&sc->bge_ldata.bge_status_block,
4073                                     &sc->bge_ldata.bge_status_block_paddr);
4074         if (error) {
4075                 if_printf(ifp, "could not create status block\n");
4076                 return error;
4077         }
4078
4079         /*
4080          * Create DMA stuffs for statistics block.
4081          */
4082         error = bge_dma_block_alloc(sc, BGE_STATS_SZ,
4083                                     &sc->bge_cdata.bge_stats_tag,
4084                                     &sc->bge_cdata.bge_stats_map,
4085                                     (void *)&sc->bge_ldata.bge_stats,
4086                                     &sc->bge_ldata.bge_stats_paddr);
4087         if (error) {
4088                 if_printf(ifp, "could not create stats block\n");
4089                 return error;
4090         }
4091         return 0;
4092 }
4093
4094 static int
4095 bge_dma_block_alloc(struct bge_softc *sc, bus_size_t size, bus_dma_tag_t *tag,
4096                     bus_dmamap_t *map, void **addr, bus_addr_t *paddr)
4097 {
4098         bus_dmamem_t dmem;
4099         int error;
4100
4101         error = bus_dmamem_coherent(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0,
4102                                     BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
4103                                     size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
4104         if (error)
4105                 return error;
4106
4107         *tag = dmem.dmem_tag;
4108         *map = dmem.dmem_map;
4109         *addr = dmem.dmem_addr;
4110         *paddr = dmem.dmem_busaddr;
4111
4112         return 0;
4113 }
4114
4115 static void
4116 bge_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr)
4117 {
4118         if (tag != NULL) {
4119                 bus_dmamap_unload(tag, map);
4120                 bus_dmamem_free(tag, addr, map);
4121                 bus_dma_tag_destroy(tag);
4122         }
4123 }
4124
4125 /*
4126  * Grrr. The link status word in the status block does
4127  * not work correctly on the BCM5700 rev AX and BX chips,
4128  * according to all available information. Hence, we have
4129  * to enable MII interrupts in order to properly obtain
4130  * async link changes. Unfortunately, this also means that
4131  * we have to read the MAC status register to detect link
4132  * changes, thereby adding an additional register access to
4133  * the interrupt handler.
4134  *
4135  * XXX: perhaps link state detection procedure used for
4136  * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
4137  */
4138 static void
4139 bge_bcm5700_link_upd(struct bge_softc *sc, uint32_t status __unused)
4140 {
4141         struct ifnet *ifp = &sc->arpcom.ac_if;
4142         struct mii_data *mii = device_get_softc(sc->bge_miibus);
4143
4144         mii_pollstat(mii);
4145
4146         if (!sc->bge_link &&
4147             (mii->mii_media_status & IFM_ACTIVE) &&
4148             IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4149                 sc->bge_link++;
4150                 if (bootverbose)
4151                         if_printf(ifp, "link UP\n");
4152         } else if (sc->bge_link &&
4153             (!(mii->mii_media_status & IFM_ACTIVE) ||
4154             IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4155                 sc->bge_link = 0;
4156                 if (bootverbose)
4157                         if_printf(ifp, "link DOWN\n");
4158         }
4159
4160         /* Clear the interrupt. */
4161         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_MI_INTERRUPT);
4162         bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
4163         bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, BRGPHY_INTRS);
4164 }
4165
4166 static void
4167 bge_tbi_link_upd(struct bge_softc *sc, uint32_t status)
4168 {
4169         struct ifnet *ifp = &sc->arpcom.ac_if;
4170
4171 #define PCS_ENCODE_ERR  (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE)
4172
4173         /*
4174          * Sometimes PCS encoding errors are detected in
4175          * TBI mode (on fiber NICs), and for some reason
4176          * the chip will signal them as link changes.
4177          * If we get a link change event, but the 'PCS
4178          * encoding error' bit in the MAC status register
4179          * is set, don't bother doing a link check.
4180          * This avoids spurious "gigabit link up" messages
4181          * that sometimes appear on fiber NICs during
4182          * periods of heavy traffic.
4183          */
4184         if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
4185                 if (!sc->bge_link) {
4186                         sc->bge_link++;
4187                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
4188                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
4189                                     BGE_MACMODE_TBI_SEND_CFGS);
4190                         }
4191                         CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
4192
4193                         if (bootverbose)
4194                                 if_printf(ifp, "link UP\n");
4195
4196                         ifp->if_link_state = LINK_STATE_UP;
4197                         if_link_state_change(ifp);
4198                 }
4199         } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) {
4200                 if (sc->bge_link) {
4201                         sc->bge_link = 0;
4202
4203                         if (bootverbose)
4204                                 if_printf(ifp, "link DOWN\n");
4205
4206                         ifp->if_link_state = LINK_STATE_DOWN;
4207                         if_link_state_change(ifp);
4208                 }
4209         }
4210
4211 #undef PCS_ENCODE_ERR
4212
4213         /* Clear the attention. */
4214         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
4215             BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
4216             BGE_MACSTAT_LINK_CHANGED);
4217 }
4218
4219 static void
4220 bge_copper_link_upd(struct bge_softc *sc, uint32_t status __unused)
4221 {
4222         struct ifnet *ifp = &sc->arpcom.ac_if;
4223         struct mii_data *mii = device_get_softc(sc->bge_miibus);
4224
4225         mii_pollstat(mii);
4226         bge_miibus_statchg(sc->bge_dev);
4227
4228         if (bootverbose) {
4229                 if (sc->bge_link)
4230                         if_printf(ifp, "link UP\n");
4231                 else
4232                         if_printf(ifp, "link DOWN\n");
4233         }
4234
4235         /* Clear the attention. */
4236         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
4237             BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
4238             BGE_MACSTAT_LINK_CHANGED);
4239 }
4240
4241 static void
4242 bge_autopoll_link_upd(struct bge_softc *sc, uint32_t status __unused)
4243 {
4244         struct ifnet *ifp = &sc->arpcom.ac_if;
4245         struct mii_data *mii = device_get_softc(sc->bge_miibus);
4246
4247         mii_pollstat(mii);
4248
4249         if (!sc->bge_link &&
4250             (mii->mii_media_status & IFM_ACTIVE) &&
4251             IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4252                 sc->bge_link++;
4253                 if (bootverbose)
4254                         if_printf(ifp, "link UP\n");
4255         } else if (sc->bge_link &&
4256             (!(mii->mii_media_status & IFM_ACTIVE) ||
4257             IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4258                 sc->bge_link = 0;
4259                 if (bootverbose)
4260                         if_printf(ifp, "link DOWN\n");
4261         }
4262
4263         /* Clear the attention. */
4264         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
4265             BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
4266             BGE_MACSTAT_LINK_CHANGED);
4267 }
4268
4269 static int
4270 bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS)
4271 {
4272         struct bge_softc *sc = arg1;
4273
4274         return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
4275                                    &sc->bge_rx_coal_ticks,
4276                                    BGE_RX_COAL_TICKS_CHG);
4277 }
4278
4279 static int
4280 bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS)
4281 {
4282         struct bge_softc *sc = arg1;
4283
4284         return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
4285                                    &sc->bge_tx_coal_ticks,
4286                                    BGE_TX_COAL_TICKS_CHG);
4287 }
4288
4289 static int
4290 bge_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS)
4291 {
4292         struct bge_softc *sc = arg1;
4293
4294         return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
4295                                    &sc->bge_rx_coal_bds,
4296                                    BGE_RX_COAL_BDS_CHG);
4297 }
4298
4299 static int
4300 bge_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS)
4301 {
4302         struct bge_softc *sc = arg1;
4303
4304         return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
4305                                    &sc->bge_tx_coal_bds,
4306                                    BGE_TX_COAL_BDS_CHG);
4307 }
4308
4309 static int
4310 bge_sysctl_rx_coal_ticks_int(SYSCTL_HANDLER_ARGS)
4311 {
4312         struct bge_softc *sc = arg1;
4313
4314         return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
4315                                    &sc->bge_rx_coal_ticks_int,
4316                                    BGE_RX_COAL_TICKS_INT_CHG);
4317 }
4318
4319 static int
4320 bge_sysctl_tx_coal_ticks_int(SYSCTL_HANDLER_ARGS)
4321 {
4322         struct bge_softc *sc = arg1;
4323
4324         return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
4325                                    &sc->bge_tx_coal_ticks_int,
4326                                    BGE_TX_COAL_TICKS_INT_CHG);
4327 }
4328
4329 static int
4330 bge_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS)
4331 {
4332         struct bge_softc *sc = arg1;
4333
4334         return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
4335                                    &sc->bge_rx_coal_bds_int,
4336                                    BGE_RX_COAL_BDS_INT_CHG);
4337 }
4338
4339 static int
4340 bge_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS)
4341 {
4342         struct bge_softc *sc = arg1;
4343
4344         return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
4345                                    &sc->bge_tx_coal_bds_int,
4346                                    BGE_TX_COAL_BDS_INT_CHG);
4347 }
4348
4349 static int
4350 bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal,
4351                     uint32_t coal_chg_mask)
4352 {
4353         struct bge_softc *sc = arg1;
4354         struct ifnet *ifp = &sc->arpcom.ac_if;
4355         int error = 0, v;
4356
4357         lwkt_serialize_enter(ifp->if_serializer);
4358
4359         v = *coal;
4360         error = sysctl_handle_int(oidp, &v, 0, req);
4361         if (!error && req->newptr != NULL) {
4362                 if (v < 0) {
4363                         error = EINVAL;
4364                 } else {
4365                         *coal = v;
4366                         sc->bge_coal_chg |= coal_chg_mask;
4367                 }
4368         }
4369
4370         lwkt_serialize_exit(ifp->if_serializer);
4371         return error;
4372 }
4373
4374 static void
4375 bge_coal_change(struct bge_softc *sc)
4376 {
4377         struct ifnet *ifp = &sc->arpcom.ac_if;
4378         uint32_t val;
4379
4380         ASSERT_SERIALIZED(ifp->if_serializer);
4381
4382         if (sc->bge_coal_chg & BGE_RX_COAL_TICKS_CHG) {
4383                 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS,
4384                             sc->bge_rx_coal_ticks);
4385                 DELAY(10);
4386                 val = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
4387
4388                 if (bootverbose) {
4389                         if_printf(ifp, "rx_coal_ticks -> %u\n",
4390                                   sc->bge_rx_coal_ticks);
4391                 }
4392         }
4393
4394         if (sc->bge_coal_chg & BGE_TX_COAL_TICKS_CHG) {
4395                 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS,
4396                             sc->bge_tx_coal_ticks);
4397                 DELAY(10);
4398                 val = CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS);
4399
4400                 if (bootverbose) {
4401                         if_printf(ifp, "tx_coal_ticks -> %u\n",
4402                                   sc->bge_tx_coal_ticks);
4403                 }
4404         }
4405
4406         if (sc->bge_coal_chg & BGE_RX_COAL_BDS_CHG) {
4407                 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS,
4408                             sc->bge_rx_coal_bds);
4409                 DELAY(10);
4410                 val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
4411
4412                 if (bootverbose) {
4413                         if_printf(ifp, "rx_coal_bds -> %u\n",
4414                                   sc->bge_rx_coal_bds);
4415                 }
4416         }
4417
4418         if (sc->bge_coal_chg & BGE_TX_COAL_BDS_CHG) {
4419                 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS,
4420                             sc->bge_tx_coal_bds);
4421                 DELAY(10);
4422                 val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS);
4423
4424                 if (bootverbose) {
4425                         if_printf(ifp, "tx_max_coal_bds -> %u\n",
4426                                   sc->bge_tx_coal_bds);
4427                 }
4428         }
4429
4430         if (sc->bge_coal_chg & BGE_RX_COAL_TICKS_INT_CHG) {
4431                 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT,
4432                     sc->bge_rx_coal_ticks_int);
4433                 DELAY(10);
4434                 val = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS_INT);
4435
4436                 if (bootverbose) {
4437                         if_printf(ifp, "rx_coal_ticks_int -> %u\n",
4438                             sc->bge_rx_coal_ticks_int);
4439                 }
4440         }
4441
4442         if (sc->bge_coal_chg & BGE_TX_COAL_TICKS_INT_CHG) {
4443                 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT,
4444                     sc->bge_tx_coal_ticks_int);
4445                 DELAY(10);
4446                 val = CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS_INT);
4447
4448                 if (bootverbose) {
4449                         if_printf(ifp, "tx_coal_ticks_int -> %u\n",
4450                             sc->bge_tx_coal_ticks_int);
4451                 }
4452         }
4453
4454         if (sc->bge_coal_chg & BGE_RX_COAL_BDS_INT_CHG) {
4455                 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT,
4456                     sc->bge_rx_coal_bds_int);
4457                 DELAY(10);
4458                 val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT);
4459
4460                 if (bootverbose) {
4461                         if_printf(ifp, "rx_coal_bds_int -> %u\n",
4462                             sc->bge_rx_coal_bds_int);
4463                 }
4464         }
4465
4466         if (sc->bge_coal_chg & BGE_TX_COAL_BDS_INT_CHG) {
4467                 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT,
4468                     sc->bge_tx_coal_bds_int);
4469                 DELAY(10);
4470                 val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT);
4471
4472                 if (bootverbose) {
4473                         if_printf(ifp, "tx_coal_bds_int -> %u\n",
4474                             sc->bge_tx_coal_bds_int);
4475                 }
4476         }
4477
4478         sc->bge_coal_chg = 0;
4479 }
4480
4481 static void
4482 bge_enable_intr(struct bge_softc *sc)
4483 {
4484         struct ifnet *ifp = &sc->arpcom.ac_if;
4485
4486         lwkt_serialize_handler_enable(ifp->if_serializer);
4487
4488         /*
4489          * Enable interrupt.
4490          */
4491         bge_writembx(sc, BGE_MBX_IRQ0_LO, sc->bge_status_tag << 24);
4492
4493         /*
4494          * Unmask the interrupt when we stop polling.
4495          */
4496         PCI_CLRBIT(sc->bge_dev, BGE_PCI_MISC_CTL,
4497             BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
4498
4499         /*
4500          * Trigger another interrupt, since above writing
4501          * to interrupt mailbox0 may acknowledge pending
4502          * interrupt.
4503          */
4504         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4505 }
4506
4507 static void
4508 bge_disable_intr(struct bge_softc *sc)
4509 {
4510         struct ifnet *ifp = &sc->arpcom.ac_if;
4511
4512         /*
4513          * Mask the interrupt when we start polling.
4514          */
4515         PCI_SETBIT(sc->bge_dev, BGE_PCI_MISC_CTL,
4516             BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
4517
4518         /*
4519          * Acknowledge possible asserted interrupt.
4520          */
4521         bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4522
4523         lwkt_serialize_handler_disable(ifp->if_serializer);
4524 }
4525
4526 static int
4527 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
4528 {
4529         uint32_t mac_addr;
4530         int ret = 1;
4531
4532         mac_addr = bge_readmem_ind(sc, 0x0c14);
4533         if ((mac_addr >> 16) == 0x484b) {
4534                 ether_addr[0] = (uint8_t)(mac_addr >> 8);
4535                 ether_addr[1] = (uint8_t)mac_addr;
4536                 mac_addr = bge_readmem_ind(sc, 0x0c18);
4537                 ether_addr[2] = (uint8_t)(mac_addr >> 24);
4538                 ether_addr[3] = (uint8_t)(mac_addr >> 16);
4539                 ether_addr[4] = (uint8_t)(mac_addr >> 8);
4540                 ether_addr[5] = (uint8_t)mac_addr;
4541                 ret = 0;
4542         }
4543         return ret;
4544 }
4545
4546 static int
4547 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
4548 {
4549         int mac_offset = BGE_EE_MAC_OFFSET;
4550
4551         if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
4552                 mac_offset = BGE_EE_MAC_OFFSET_5906;
4553
4554         return bge_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN);
4555 }
4556
4557 static int
4558 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
4559 {
4560         if (sc->bge_flags & BGE_FLAG_NO_EEPROM)
4561                 return 1;
4562
4563         return bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
4564                                ETHER_ADDR_LEN);
4565 }
4566
4567 static int
4568 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
4569 {
4570         static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
4571                 /* NOTE: Order is critical */
4572                 bge_get_eaddr_mem,
4573                 bge_get_eaddr_nvram,
4574                 bge_get_eaddr_eeprom,
4575                 NULL
4576         };
4577         const bge_eaddr_fcn_t *func;
4578
4579         for (func = bge_eaddr_funcs; *func != NULL; ++func) {
4580                 if ((*func)(sc, eaddr) == 0)
4581                         break;
4582         }
4583         return (*func == NULL ? ENXIO : 0);
4584 }
4585
4586 /*
4587  * NOTE: 'm' is not freed upon failure
4588  */
4589 struct mbuf *
4590 bge_defrag_shortdma(struct mbuf *m)
4591 {
4592         struct mbuf *n;
4593         int found;
4594
4595         /*
4596          * If device receive two back-to-back send BDs with less than
4597          * or equal to 8 total bytes then the device may hang.  The two
4598          * back-to-back send BDs must in the same frame for this failure
4599          * to occur.  Scan mbuf chains and see whether two back-to-back
4600          * send BDs are there.  If this is the case, allocate new mbuf
4601          * and copy the frame to workaround the silicon bug.
4602          */
4603         for (n = m, found = 0; n != NULL; n = n->m_next) {
4604                 if (n->m_len < 8) {
4605                         found++;
4606                         if (found > 1)
4607                                 break;
4608                         continue;
4609                 }
4610                 found = 0;
4611         }
4612
4613         if (found > 1)
4614                 n = m_defrag(m, MB_DONTWAIT);
4615         else
4616                 n = m;
4617         return n;
4618 }
4619
4620 static void
4621 bge_stop_block(struct bge_softc *sc, bus_size_t reg, uint32_t bit)
4622 {
4623         int i;
4624
4625         BGE_CLRBIT(sc, reg, bit);
4626         for (i = 0; i < BGE_TIMEOUT; i++) {
4627                 if ((CSR_READ_4(sc, reg) & bit) == 0)
4628                         return;
4629                 DELAY(100);
4630         }
4631 }
4632
4633 static void
4634 bge_link_poll(struct bge_softc *sc)
4635 {
4636         uint32_t status;
4637
4638         status = CSR_READ_4(sc, BGE_MAC_STS);
4639         if ((status & sc->bge_link_chg) || sc->bge_link_evt) {
4640                 sc->bge_link_evt = 0;
4641                 sc->bge_link_upd(sc, status);
4642         }
4643 }