bge: NetExtreme/NetLink controllers require memory not across 4G boundary
[dragonfly.git] / sys / dev / netif / bge / if_bge.c
1 /*
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
34  */
35
36 /*
37  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
38  * 
39  * Written by Bill Paul <wpaul@windriver.com>
40  * Senior Engineer, Wind River Systems
41  */
42
43 /*
44  * The Broadcom BCM5700 is based on technology originally developed by
45  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
46  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
47  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
48  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
49  * frames, highly configurable RX filtering, and 16 RX and TX queues
50  * (which, along with RX filter rules, can be used for QOS applications).
51  * Other features, such as TCP segmentation, may be available as part
52  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
53  * firmware images can be stored in hardware and need not be compiled
54  * into the driver.
55  *
56  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
57  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
58  * 
59  * The BCM5701 is a single-chip solution incorporating both the BCM5700
60  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
61  * does not support external SSRAM.
62  *
63  * Broadcom also produces a variation of the BCM5700 under the "Altima"
64  * brand name, which is functionally similar but lacks PCI-X support.
65  *
66  * Without external SSRAM, you can only have at most 4 TX rings,
67  * and the use of the mini RX ring is disabled. This seems to imply
68  * that these features are simply not available on the BCM5701. As a
69  * result, this driver does not implement any support for the mini RX
70  * ring.
71  */
72
73 #include "opt_polling.h"
74
75 #include <sys/param.h>
76 #include <sys/bus.h>
77 #include <sys/endian.h>
78 #include <sys/kernel.h>
79 #include <sys/ktr.h>
80 #include <sys/interrupt.h>
81 #include <sys/mbuf.h>
82 #include <sys/malloc.h>
83 #include <sys/queue.h>
84 #include <sys/rman.h>
85 #include <sys/serialize.h>
86 #include <sys/socket.h>
87 #include <sys/sockio.h>
88 #include <sys/sysctl.h>
89
90 #include <net/bpf.h>
91 #include <net/ethernet.h>
92 #include <net/if.h>
93 #include <net/if_arp.h>
94 #include <net/if_dl.h>
95 #include <net/if_media.h>
96 #include <net/if_types.h>
97 #include <net/ifq_var.h>
98 #include <net/vlan/if_vlan_var.h>
99 #include <net/vlan/if_vlan_ether.h>
100
101 #include <dev/netif/mii_layer/mii.h>
102 #include <dev/netif/mii_layer/miivar.h>
103 #include <dev/netif/mii_layer/brgphyreg.h>
104
105 #include <bus/pci/pcidevs.h>
106 #include <bus/pci/pcireg.h>
107 #include <bus/pci/pcivar.h>
108
109 #include <dev/netif/bge/if_bgereg.h>
110
111 /* "device miibus" required.  See GENERIC if you get errors here. */
112 #include "miibus_if.h"
113
114 #define BGE_CSUM_FEATURES       (CSUM_IP | CSUM_TCP)
115 #define BGE_MIN_FRAME           60
116
117 static const struct bge_type bge_devs[] = {
118         { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996,
119                 "3COM 3C996 Gigabit Ethernet" },
120
121         { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700,
122                 "Alteon BCM5700 Gigabit Ethernet" },
123         { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701,
124                 "Alteon BCM5701 Gigabit Ethernet" },
125
126         { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000,
127                 "Altima AC1000 Gigabit Ethernet" },
128         { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001,
129                 "Altima AC1002 Gigabit Ethernet" },
130         { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100,
131                 "Altima AC9100 Gigabit Ethernet" },
132
133         { PCI_VENDOR_APPLE, PCI_PRODUCT_APPLE_BCM5701,
134                 "Apple BCM5701 Gigabit Ethernet" },
135
136         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700,
137                 "Broadcom BCM5700 Gigabit Ethernet" },
138         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701,
139                 "Broadcom BCM5701 Gigabit Ethernet" },
140         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702,
141                 "Broadcom BCM5702 Gigabit Ethernet" },
142         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X,
143                 "Broadcom BCM5702X Gigabit Ethernet" },
144         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT,
145                 "Broadcom BCM5702 Gigabit Ethernet" },
146         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703,
147                 "Broadcom BCM5703 Gigabit Ethernet" },
148         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X,
149                 "Broadcom BCM5703X Gigabit Ethernet" },
150         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703A3,
151                 "Broadcom BCM5703 Gigabit Ethernet" },
152         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C,
153                 "Broadcom BCM5704C Dual Gigabit Ethernet" },
154         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S,
155                 "Broadcom BCM5704S Dual Gigabit Ethernet" },
156         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S_ALT,
157                 "Broadcom BCM5704S Dual Gigabit Ethernet" },
158         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705,
159                 "Broadcom BCM5705 Gigabit Ethernet" },
160         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705F,
161                 "Broadcom BCM5705F Gigabit Ethernet" },
162         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K,
163                 "Broadcom BCM5705K Gigabit Ethernet" },
164         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M,
165                 "Broadcom BCM5705M Gigabit Ethernet" },
166         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT,
167                 "Broadcom BCM5705M Gigabit Ethernet" },
168         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714,
169                 "Broadcom BCM5714C Gigabit Ethernet" },
170         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714S,
171                 "Broadcom BCM5714S Gigabit Ethernet" },
172         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715,
173                 "Broadcom BCM5715 Gigabit Ethernet" },
174         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715S,
175                 "Broadcom BCM5715S Gigabit Ethernet" },
176         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720,
177                 "Broadcom BCM5720 Gigabit Ethernet" },
178         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721,
179                 "Broadcom BCM5721 Gigabit Ethernet" },
180         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5722,
181                 "Broadcom BCM5722 Gigabit Ethernet" },
182         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5723,
183                 "Broadcom BCM5723 Gigabit Ethernet" },
184         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750,
185                 "Broadcom BCM5750 Gigabit Ethernet" },
186         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750M,
187                 "Broadcom BCM5750M Gigabit Ethernet" },
188         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751,
189                 "Broadcom BCM5751 Gigabit Ethernet" },
190         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751F,
191                 "Broadcom BCM5751F Gigabit Ethernet" },
192         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M,
193                 "Broadcom BCM5751M Gigabit Ethernet" },
194         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752,
195                 "Broadcom BCM5752 Gigabit Ethernet" },
196         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752M,
197                 "Broadcom BCM5752M Gigabit Ethernet" },
198         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753,
199                 "Broadcom BCM5753 Gigabit Ethernet" },
200         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753F,
201                 "Broadcom BCM5753F Gigabit Ethernet" },
202         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753M,
203                 "Broadcom BCM5753M Gigabit Ethernet" },
204         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754,
205                 "Broadcom BCM5754 Gigabit Ethernet" },
206         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754M,
207                 "Broadcom BCM5754M Gigabit Ethernet" },
208         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755,
209                 "Broadcom BCM5755 Gigabit Ethernet" },
210         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755M,
211                 "Broadcom BCM5755M Gigabit Ethernet" },
212         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5756,
213                 "Broadcom BCM5756 Gigabit Ethernet" },
214         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761,
215                 "Broadcom BCM5761 Gigabit Ethernet" },
216         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761E,
217                 "Broadcom BCM5761E Gigabit Ethernet" },
218         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761S,
219                 "Broadcom BCM5761S Gigabit Ethernet" },
220         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761SE,
221                 "Broadcom BCM5761SE Gigabit Ethernet" },
222         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5764,
223                 "Broadcom BCM5764 Gigabit Ethernet" },
224         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780,
225                 "Broadcom BCM5780 Gigabit Ethernet" },
226         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780S,
227                 "Broadcom BCM5780S Gigabit Ethernet" },
228         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5781,
229                 "Broadcom BCM5781 Gigabit Ethernet" },
230         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782,
231                 "Broadcom BCM5782 Gigabit Ethernet" },
232         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5784,
233                 "Broadcom BCM5784 Gigabit Ethernet" },
234         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785F,
235                 "Broadcom BCM5785F Gigabit Ethernet" },
236         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785G,
237                 "Broadcom BCM5785G Gigabit Ethernet" },
238         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5786,
239                 "Broadcom BCM5786 Gigabit Ethernet" },
240         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787,
241                 "Broadcom BCM5787 Gigabit Ethernet" },
242         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787F,
243                 "Broadcom BCM5787F Gigabit Ethernet" },
244         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787M,
245                 "Broadcom BCM5787M Gigabit Ethernet" },
246         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788,
247                 "Broadcom BCM5788 Gigabit Ethernet" },
248         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789,
249                 "Broadcom BCM5789 Gigabit Ethernet" },
250         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901,
251                 "Broadcom BCM5901 Fast Ethernet" },
252         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2,
253                 "Broadcom BCM5901A2 Fast Ethernet" },
254         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5903M,
255                 "Broadcom BCM5903M Fast Ethernet" },
256         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906,
257                 "Broadcom BCM5906 Fast Ethernet"},
258         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906M,
259                 "Broadcom BCM5906M Fast Ethernet"},
260         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57760,
261                 "Broadcom BCM57760 Gigabit Ethernet"},
262         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57780,
263                 "Broadcom BCM57780 Gigabit Ethernet"},
264         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57788,
265                 "Broadcom BCM57788 Gigabit Ethernet"},
266         { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57790,
267                 "Broadcom BCM57790 Gigabit Ethernet"},
268         { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
269                 "SysKonnect Gigabit Ethernet" },
270
271         { 0, 0, NULL }
272 };
273
274 #define BGE_IS_JUMBO_CAPABLE(sc)        ((sc)->bge_flags & BGE_FLAG_JUMBO)
275 #define BGE_IS_5700_FAMILY(sc)          ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
276 #define BGE_IS_5705_PLUS(sc)            ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
277 #define BGE_IS_5714_FAMILY(sc)          ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
278 #define BGE_IS_575X_PLUS(sc)            ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
279 #define BGE_IS_5755_PLUS(sc)            ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
280
281 typedef int     (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
282
283 static int      bge_probe(device_t);
284 static int      bge_attach(device_t);
285 static int      bge_detach(device_t);
286 static void     bge_txeof(struct bge_softc *);
287 static void     bge_rxeof(struct bge_softc *);
288
289 static void     bge_tick(void *);
290 static void     bge_stats_update(struct bge_softc *);
291 static void     bge_stats_update_regs(struct bge_softc *);
292 static int      bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
293
294 #ifdef DEVICE_POLLING
295 static void     bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
296 #endif
297 static void     bge_intr(void *);
298 static void     bge_enable_intr(struct bge_softc *);
299 static void     bge_disable_intr(struct bge_softc *);
300 static void     bge_start(struct ifnet *);
301 static int      bge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
302 static void     bge_init(void *);
303 static void     bge_stop(struct bge_softc *);
304 static void     bge_watchdog(struct ifnet *);
305 static void     bge_shutdown(device_t);
306 static int      bge_suspend(device_t);
307 static int      bge_resume(device_t);
308 static int      bge_ifmedia_upd(struct ifnet *);
309 static void     bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
310
311 static uint8_t  bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
312 static int      bge_read_nvram(struct bge_softc *, caddr_t, int, int);
313
314 static uint8_t  bge_eeprom_getbyte(struct bge_softc *, uint32_t, uint8_t *);
315 static int      bge_read_eeprom(struct bge_softc *, caddr_t, uint32_t, size_t);
316
317 static void     bge_setmulti(struct bge_softc *);
318 static void     bge_setpromisc(struct bge_softc *);
319
320 static int      bge_alloc_jumbo_mem(struct bge_softc *);
321 static void     bge_free_jumbo_mem(struct bge_softc *);
322 static struct bge_jslot
323                 *bge_jalloc(struct bge_softc *);
324 static void     bge_jfree(void *);
325 static void     bge_jref(void *);
326 static int      bge_newbuf_std(struct bge_softc *, int, int);
327 static int      bge_newbuf_jumbo(struct bge_softc *, int, int);
328 static void     bge_setup_rxdesc_std(struct bge_softc *, int);
329 static void     bge_setup_rxdesc_jumbo(struct bge_softc *, int);
330 static int      bge_init_rx_ring_std(struct bge_softc *);
331 static void     bge_free_rx_ring_std(struct bge_softc *);
332 static int      bge_init_rx_ring_jumbo(struct bge_softc *);
333 static void     bge_free_rx_ring_jumbo(struct bge_softc *);
334 static void     bge_free_tx_ring(struct bge_softc *);
335 static int      bge_init_tx_ring(struct bge_softc *);
336
337 static int      bge_chipinit(struct bge_softc *);
338 static int      bge_blockinit(struct bge_softc *);
339
340 static uint32_t bge_readmem_ind(struct bge_softc *, uint32_t);
341 static void     bge_writemem_ind(struct bge_softc *, uint32_t, uint32_t);
342 #ifdef notdef
343 static uint32_t bge_readreg_ind(struct bge_softc *, uint32_t);
344 #endif
345 static void     bge_writereg_ind(struct bge_softc *, uint32_t, uint32_t);
346 static void     bge_writemem_direct(struct bge_softc *, uint32_t, uint32_t);
347 static void     bge_writembx(struct bge_softc *, int, int);
348
349 static int      bge_miibus_readreg(device_t, int, int);
350 static int      bge_miibus_writereg(device_t, int, int, int);
351 static void     bge_miibus_statchg(device_t);
352 static void     bge_bcm5700_link_upd(struct bge_softc *, uint32_t);
353 static void     bge_tbi_link_upd(struct bge_softc *, uint32_t);
354 static void     bge_copper_link_upd(struct bge_softc *, uint32_t);
355
356 static void     bge_reset(struct bge_softc *);
357
358 static int      bge_dma_alloc(struct bge_softc *);
359 static void     bge_dma_free(struct bge_softc *);
360 static int      bge_dma_block_alloc(struct bge_softc *, bus_size_t,
361                                     bus_dma_tag_t *, bus_dmamap_t *,
362                                     void **, bus_addr_t *);
363 static void     bge_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
364
365 static int      bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
366 static int      bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
367 static int      bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
368 static int      bge_get_eaddr(struct bge_softc *, uint8_t[]);
369
370 static void     bge_coal_change(struct bge_softc *);
371 static int      bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS);
372 static int      bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS);
373 static int      bge_sysctl_rx_max_coal_bds(SYSCTL_HANDLER_ARGS);
374 static int      bge_sysctl_tx_max_coal_bds(SYSCTL_HANDLER_ARGS);
375 static int      bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *, uint32_t);
376
377 /*
378  * Set following tunable to 1 for some IBM blade servers with the DNLK
379  * switch module. Auto negotiation is broken for those configurations.
380  */
381 static int      bge_fake_autoneg = 0;
382 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg);
383
384 /* Interrupt moderation control variables. */
385 static int      bge_rx_coal_ticks = 100;        /* usec */
386 static int      bge_tx_coal_ticks = 1023;       /* usec */
387 static int      bge_rx_max_coal_bds = 80;
388 static int      bge_tx_max_coal_bds = 128;
389
390 TUNABLE_INT("hw.bge.rx_coal_ticks", &bge_rx_coal_ticks);
391 TUNABLE_INT("hw.bge.tx_coal_ticks", &bge_tx_coal_ticks);
392 TUNABLE_INT("hw.bge.rx_max_coal_bds", &bge_rx_max_coal_bds);
393 TUNABLE_INT("hw.bge.tx_max_coal_bds", &bge_tx_max_coal_bds);
394
395 #if !defined(KTR_IF_BGE)
396 #define KTR_IF_BGE      KTR_ALL
397 #endif
398 KTR_INFO_MASTER(if_bge);
399 KTR_INFO(KTR_IF_BGE, if_bge, intr, 0, "intr");
400 KTR_INFO(KTR_IF_BGE, if_bge, rx_pkt, 1, "rx_pkt");
401 KTR_INFO(KTR_IF_BGE, if_bge, tx_pkt, 2, "tx_pkt");
402 #define logif(name)     KTR_LOG(if_bge_ ## name)
403
404 static device_method_t bge_methods[] = {
405         /* Device interface */
406         DEVMETHOD(device_probe,         bge_probe),
407         DEVMETHOD(device_attach,        bge_attach),
408         DEVMETHOD(device_detach,        bge_detach),
409         DEVMETHOD(device_shutdown,      bge_shutdown),
410         DEVMETHOD(device_suspend,       bge_suspend),
411         DEVMETHOD(device_resume,        bge_resume),
412
413         /* bus interface */
414         DEVMETHOD(bus_print_child,      bus_generic_print_child),
415         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
416
417         /* MII interface */
418         DEVMETHOD(miibus_readreg,       bge_miibus_readreg),
419         DEVMETHOD(miibus_writereg,      bge_miibus_writereg),
420         DEVMETHOD(miibus_statchg,       bge_miibus_statchg),
421
422         { 0, 0 }
423 };
424
425 static DEFINE_CLASS_0(bge, bge_driver, bge_methods, sizeof(struct bge_softc));
426 static devclass_t bge_devclass;
427
428 DECLARE_DUMMY_MODULE(if_bge);
429 DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, NULL, NULL);
430 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, NULL, NULL);
431
432 static uint32_t
433 bge_readmem_ind(struct bge_softc *sc, uint32_t off)
434 {
435         device_t dev = sc->bge_dev;
436         uint32_t val;
437
438         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
439         val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
440         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
441         return (val);
442 }
443
444 static void
445 bge_writemem_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
446 {
447         device_t dev = sc->bge_dev;
448
449         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
450         pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
451         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
452 }
453
454 #ifdef notdef
455 static uint32_t
456 bge_readreg_ind(struct bge_softc *sc, uin32_t off)
457 {
458         device_t dev = sc->bge_dev;
459
460         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
461         return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
462 }
463 #endif
464
465 static void
466 bge_writereg_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
467 {
468         device_t dev = sc->bge_dev;
469
470         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
471         pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
472 }
473
474 static void
475 bge_writemem_direct(struct bge_softc *sc, uint32_t off, uint32_t val)
476 {
477         CSR_WRITE_4(sc, off, val);
478 }
479
480 static void
481 bge_writembx(struct bge_softc *sc, int off, int val)
482 {
483         if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
484                 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
485
486         CSR_WRITE_4(sc, off, val);
487 }
488
489 static uint8_t
490 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
491 {
492         uint32_t access, byte = 0;
493         int i;
494
495         /* Lock. */
496         CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
497         for (i = 0; i < 8000; i++) {
498                 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
499                         break;
500                 DELAY(20);
501         }
502         if (i == 8000)
503                 return (1);
504
505         /* Enable access. */
506         access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
507         CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
508
509         CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
510         CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
511         for (i = 0; i < BGE_TIMEOUT * 10; i++) {
512                 DELAY(10);
513                 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
514                         DELAY(10);
515                         break;
516                 }
517         }
518
519         if (i == BGE_TIMEOUT * 10) {
520                 if_printf(&sc->arpcom.ac_if, "nvram read timed out\n");
521                 return (1);
522         }
523
524         /* Get result. */
525         byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
526
527         *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
528
529         /* Disable access. */
530         CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
531
532         /* Unlock. */
533         CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
534         CSR_READ_4(sc, BGE_NVRAM_SWARB);
535
536         return (0);
537 }
538
539 /*
540  * Read a sequence of bytes from NVRAM.
541  */
542 static int
543 bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
544 {
545         int err = 0, i;
546         uint8_t byte = 0;
547
548         if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
549                 return (1);
550
551         for (i = 0; i < cnt; i++) {
552                 err = bge_nvram_getbyte(sc, off + i, &byte);
553                 if (err)
554                         break;
555                 *(dest + i) = byte;
556         }
557
558         return (err ? 1 : 0);
559 }
560
561 /*
562  * Read a byte of data stored in the EEPROM at address 'addr.' The
563  * BCM570x supports both the traditional bitbang interface and an
564  * auto access interface for reading the EEPROM. We use the auto
565  * access method.
566  */
567 static uint8_t
568 bge_eeprom_getbyte(struct bge_softc *sc, uint32_t addr, uint8_t *dest)
569 {
570         int i;
571         uint32_t byte = 0;
572
573         /*
574          * Enable use of auto EEPROM access so we can avoid
575          * having to use the bitbang method.
576          */
577         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
578
579         /* Reset the EEPROM, load the clock period. */
580         CSR_WRITE_4(sc, BGE_EE_ADDR,
581             BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
582         DELAY(20);
583
584         /* Issue the read EEPROM command. */
585         CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
586
587         /* Wait for completion */
588         for(i = 0; i < BGE_TIMEOUT * 10; i++) {
589                 DELAY(10);
590                 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
591                         break;
592         }
593
594         if (i == BGE_TIMEOUT) {
595                 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
596                 return(1);
597         }
598
599         /* Get result. */
600         byte = CSR_READ_4(sc, BGE_EE_DATA);
601
602         *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
603
604         return(0);
605 }
606
607 /*
608  * Read a sequence of bytes from the EEPROM.
609  */
610 static int
611 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, uint32_t off, size_t len)
612 {
613         size_t i;
614         int err;
615         uint8_t byte;
616
617         for (byte = 0, err = 0, i = 0; i < len; i++) {
618                 err = bge_eeprom_getbyte(sc, off + i, &byte);
619                 if (err)
620                         break;
621                 *(dest + i) = byte;
622         }
623
624         return(err ? 1 : 0);
625 }
626
627 static int
628 bge_miibus_readreg(device_t dev, int phy, int reg)
629 {
630         struct bge_softc *sc = device_get_softc(dev);
631         struct ifnet *ifp = &sc->arpcom.ac_if;
632         uint32_t val, autopoll;
633         int i;
634
635         KASSERT(phy == sc->bge_phyno,
636             ("invalid phyno %d, should be %d", phy, sc->bge_phyno));
637
638         /* Reading with autopolling on may trigger PCI errors */
639         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
640         if (autopoll & BGE_MIMODE_AUTOPOLL) {
641                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
642                 DELAY(40);
643         }
644
645         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
646             BGE_MIPHY(phy)|BGE_MIREG(reg));
647
648         for (i = 0; i < BGE_TIMEOUT; i++) {
649                 DELAY(10);
650                 val = CSR_READ_4(sc, BGE_MI_COMM);
651                 if (!(val & BGE_MICOMM_BUSY))
652                         break;
653         }
654
655         if (i == BGE_TIMEOUT) {
656                 if_printf(ifp, "PHY read timed out "
657                           "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val);
658                 val = 0;
659                 goto done;
660         }
661
662         DELAY(5);
663         val = CSR_READ_4(sc, BGE_MI_COMM);
664
665 done:
666         if (autopoll & BGE_MIMODE_AUTOPOLL) {
667                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
668                 DELAY(40);
669         }
670
671         if (val & BGE_MICOMM_READFAIL)
672                 return(0);
673
674         return(val & 0xFFFF);
675 }
676
677 static int
678 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
679 {
680         struct bge_softc *sc = device_get_softc(dev);
681         uint32_t autopoll;
682         int i;
683
684         KASSERT(phy == sc->bge_phyno,
685             ("invalid phyno %d, should be %d", phy, sc->bge_phyno));
686
687         if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
688             (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
689                return(0);
690
691         /* Reading with autopolling on may trigger PCI errors */
692         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
693         if (autopoll & BGE_MIMODE_AUTOPOLL) {
694                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
695                 DELAY(40);
696         }
697
698         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
699             BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
700
701         for (i = 0; i < BGE_TIMEOUT; i++) {
702                 DELAY(10);
703                 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
704                         DELAY(5);
705                         CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
706                         break;
707                 }
708         }
709
710         if (autopoll & BGE_MIMODE_AUTOPOLL) {
711                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
712                 DELAY(40);
713         }
714
715         if (i == BGE_TIMEOUT) {
716                 if_printf(&sc->arpcom.ac_if, "PHY write timed out "
717                           "(phy %d, reg %d, val %d)\n", phy, reg, val);
718                 return(0);
719         }
720
721         return(0);
722 }
723
724 static void
725 bge_miibus_statchg(device_t dev)
726 {
727         struct bge_softc *sc;
728         struct mii_data *mii;
729
730         sc = device_get_softc(dev);
731         mii = device_get_softc(sc->bge_miibus);
732
733         BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
734         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
735             IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
736                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
737         } else {
738                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
739         }
740
741         if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
742                 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
743         } else {
744                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
745         }
746 }
747
748 /*
749  * Memory management for jumbo frames.
750  */
751 static int
752 bge_alloc_jumbo_mem(struct bge_softc *sc)
753 {
754         struct ifnet *ifp = &sc->arpcom.ac_if;
755         struct bge_jslot *entry;
756         uint8_t *ptr;
757         bus_addr_t paddr;
758         int i, error;
759
760         /*
761          * Create tag for jumbo mbufs.
762          * This is really a bit of a kludge. We allocate a special
763          * jumbo buffer pool which (thanks to the way our DMA
764          * memory allocation works) will consist of contiguous
765          * pages. This means that even though a jumbo buffer might
766          * be larger than a page size, we don't really need to
767          * map it into more than one DMA segment. However, the
768          * default mbuf tag will result in multi-segment mappings,
769          * so we have to create a special jumbo mbuf tag that
770          * lets us get away with mapping the jumbo buffers as
771          * a single segment. I think eventually the driver should
772          * be changed so that it uses ordinary mbufs and cluster
773          * buffers, i.e. jumbo frames can span multiple DMA
774          * descriptors. But that's a project for another day.
775          */
776
777         /*
778          * Create DMA stuffs for jumbo RX ring.
779          */
780         error = bge_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ,
781                                     &sc->bge_cdata.bge_rx_jumbo_ring_tag,
782                                     &sc->bge_cdata.bge_rx_jumbo_ring_map,
783                                     (void *)&sc->bge_ldata.bge_rx_jumbo_ring,
784                                     &sc->bge_ldata.bge_rx_jumbo_ring_paddr);
785         if (error) {
786                 if_printf(ifp, "could not create jumbo RX ring\n");
787                 return error;
788         }
789
790         /*
791          * Create DMA stuffs for jumbo buffer block.
792          */
793         error = bge_dma_block_alloc(sc, BGE_JMEM,
794                                     &sc->bge_cdata.bge_jumbo_tag,
795                                     &sc->bge_cdata.bge_jumbo_map,
796                                     (void **)&sc->bge_ldata.bge_jumbo_buf,
797                                     &paddr);
798         if (error) {
799                 if_printf(ifp, "could not create jumbo buffer\n");
800                 return error;
801         }
802
803         SLIST_INIT(&sc->bge_jfree_listhead);
804
805         /*
806          * Now divide it up into 9K pieces and save the addresses
807          * in an array. Note that we play an evil trick here by using
808          * the first few bytes in the buffer to hold the the address
809          * of the softc structure for this interface. This is because
810          * bge_jfree() needs it, but it is called by the mbuf management
811          * code which will not pass it to us explicitly.
812          */
813         for (i = 0, ptr = sc->bge_ldata.bge_jumbo_buf; i < BGE_JSLOTS; i++) {
814                 entry = &sc->bge_cdata.bge_jslots[i];
815                 entry->bge_sc = sc;
816                 entry->bge_buf = ptr;
817                 entry->bge_paddr = paddr;
818                 entry->bge_inuse = 0;
819                 entry->bge_slot = i;
820                 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jslot_link);
821
822                 ptr += BGE_JLEN;
823                 paddr += BGE_JLEN;
824         }
825         return 0;
826 }
827
828 static void
829 bge_free_jumbo_mem(struct bge_softc *sc)
830 {
831         /* Destroy jumbo RX ring. */
832         bge_dma_block_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
833                            sc->bge_cdata.bge_rx_jumbo_ring_map,
834                            sc->bge_ldata.bge_rx_jumbo_ring);
835
836         /* Destroy jumbo buffer block. */
837         bge_dma_block_free(sc->bge_cdata.bge_jumbo_tag,
838                            sc->bge_cdata.bge_jumbo_map,
839                            sc->bge_ldata.bge_jumbo_buf);
840 }
841
842 /*
843  * Allocate a jumbo buffer.
844  */
845 static struct bge_jslot *
846 bge_jalloc(struct bge_softc *sc)
847 {
848         struct bge_jslot *entry;
849
850         lwkt_serialize_enter(&sc->bge_jslot_serializer);
851         entry = SLIST_FIRST(&sc->bge_jfree_listhead);
852         if (entry) {
853                 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jslot_link);
854                 entry->bge_inuse = 1;
855         } else {
856                 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
857         }
858         lwkt_serialize_exit(&sc->bge_jslot_serializer);
859         return(entry);
860 }
861
862 /*
863  * Adjust usage count on a jumbo buffer.
864  */
865 static void
866 bge_jref(void *arg)
867 {
868         struct bge_jslot *entry = (struct bge_jslot *)arg;
869         struct bge_softc *sc = entry->bge_sc;
870
871         if (sc == NULL)
872                 panic("bge_jref: can't find softc pointer!");
873
874         if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
875                 panic("bge_jref: asked to reference buffer "
876                     "that we don't manage!");
877         } else if (entry->bge_inuse == 0) {
878                 panic("bge_jref: buffer already free!");
879         } else {
880                 atomic_add_int(&entry->bge_inuse, 1);
881         }
882 }
883
884 /*
885  * Release a jumbo buffer.
886  */
887 static void
888 bge_jfree(void *arg)
889 {
890         struct bge_jslot *entry = (struct bge_jslot *)arg;
891         struct bge_softc *sc = entry->bge_sc;
892
893         if (sc == NULL)
894                 panic("bge_jfree: can't find softc pointer!");
895
896         if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
897                 panic("bge_jfree: asked to free buffer that we don't manage!");
898         } else if (entry->bge_inuse == 0) {
899                 panic("bge_jfree: buffer already free!");
900         } else {
901                 /*
902                  * Possible MP race to 0, use the serializer.  The atomic insn
903                  * is still needed for races against bge_jref().
904                  */
905                 lwkt_serialize_enter(&sc->bge_jslot_serializer);
906                 atomic_subtract_int(&entry->bge_inuse, 1);
907                 if (entry->bge_inuse == 0) {
908                         SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 
909                                           entry, jslot_link);
910                 }
911                 lwkt_serialize_exit(&sc->bge_jslot_serializer);
912         }
913 }
914
915
916 /*
917  * Intialize a standard receive ring descriptor.
918  */
919 static int
920 bge_newbuf_std(struct bge_softc *sc, int i, int init)
921 {
922         struct mbuf *m_new = NULL;
923         bus_dma_segment_t seg;
924         bus_dmamap_t map;
925         int error, nsegs;
926
927         m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
928         if (m_new == NULL)
929                 return ENOBUFS;
930         m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
931
932         if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
933                 m_adj(m_new, ETHER_ALIGN);
934
935         error = bus_dmamap_load_mbuf_segment(sc->bge_cdata.bge_rx_mtag,
936                         sc->bge_cdata.bge_rx_tmpmap, m_new,
937                         &seg, 1, &nsegs, BUS_DMA_NOWAIT);
938         if (error) {
939                 m_freem(m_new);
940                 return error;
941         }
942
943         if (!init) {
944                 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
945                                 sc->bge_cdata.bge_rx_std_dmamap[i],
946                                 BUS_DMASYNC_POSTREAD);
947                 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
948                         sc->bge_cdata.bge_rx_std_dmamap[i]);
949         }
950
951         map = sc->bge_cdata.bge_rx_tmpmap;
952         sc->bge_cdata.bge_rx_tmpmap = sc->bge_cdata.bge_rx_std_dmamap[i];
953         sc->bge_cdata.bge_rx_std_dmamap[i] = map;
954
955         sc->bge_cdata.bge_rx_std_chain[i].bge_mbuf = m_new;
956         sc->bge_cdata.bge_rx_std_chain[i].bge_paddr = seg.ds_addr;
957
958         bge_setup_rxdesc_std(sc, i);
959         return 0;
960 }
961
962 static void
963 bge_setup_rxdesc_std(struct bge_softc *sc, int i)
964 {
965         struct bge_rxchain *rc;
966         struct bge_rx_bd *r;
967
968         rc = &sc->bge_cdata.bge_rx_std_chain[i];
969         r = &sc->bge_ldata.bge_rx_std_ring[i];
970
971         r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bge_paddr);
972         r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bge_paddr);
973         r->bge_len = rc->bge_mbuf->m_len;
974         r->bge_idx = i;
975         r->bge_flags = BGE_RXBDFLAG_END;
976 }
977
978 /*
979  * Initialize a jumbo receive ring descriptor. This allocates
980  * a jumbo buffer from the pool managed internally by the driver.
981  */
982 static int
983 bge_newbuf_jumbo(struct bge_softc *sc, int i, int init)
984 {
985         struct mbuf *m_new = NULL;
986         struct bge_jslot *buf;
987         bus_addr_t paddr;
988
989         /* Allocate the mbuf. */
990         MGETHDR(m_new, init ? MB_WAIT : MB_DONTWAIT, MT_DATA);
991         if (m_new == NULL)
992                 return ENOBUFS;
993
994         /* Allocate the jumbo buffer */
995         buf = bge_jalloc(sc);
996         if (buf == NULL) {
997                 m_freem(m_new);
998                 return ENOBUFS;
999         }
1000
1001         /* Attach the buffer to the mbuf. */
1002         m_new->m_ext.ext_arg = buf;
1003         m_new->m_ext.ext_buf = buf->bge_buf;
1004         m_new->m_ext.ext_free = bge_jfree;
1005         m_new->m_ext.ext_ref = bge_jref;
1006         m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
1007
1008         m_new->m_flags |= M_EXT;
1009
1010         m_new->m_data = m_new->m_ext.ext_buf;
1011         m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
1012
1013         paddr = buf->bge_paddr;
1014         if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) {
1015                 m_adj(m_new, ETHER_ALIGN);
1016                 paddr += ETHER_ALIGN;
1017         }
1018
1019         /* Save necessary information */
1020         sc->bge_cdata.bge_rx_jumbo_chain[i].bge_mbuf = m_new;
1021         sc->bge_cdata.bge_rx_jumbo_chain[i].bge_paddr = paddr;
1022
1023         /* Set up the descriptor. */
1024         bge_setup_rxdesc_jumbo(sc, i);
1025         return 0;
1026 }
1027
1028 static void
1029 bge_setup_rxdesc_jumbo(struct bge_softc *sc, int i)
1030 {
1031         struct bge_rx_bd *r;
1032         struct bge_rxchain *rc;
1033
1034         r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
1035         rc = &sc->bge_cdata.bge_rx_jumbo_chain[i];
1036
1037         r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bge_paddr);
1038         r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bge_paddr);
1039         r->bge_len = rc->bge_mbuf->m_len;
1040         r->bge_idx = i;
1041         r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
1042 }
1043
1044 static int
1045 bge_init_rx_ring_std(struct bge_softc *sc)
1046 {
1047         int i, error;
1048
1049         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1050                 error = bge_newbuf_std(sc, i, 1);
1051                 if (error)
1052                         return error;
1053         };
1054
1055         sc->bge_std = BGE_STD_RX_RING_CNT - 1;
1056         bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1057
1058         return(0);
1059 }
1060
1061 static void
1062 bge_free_rx_ring_std(struct bge_softc *sc)
1063 {
1064         int i;
1065
1066         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1067                 struct bge_rxchain *rc = &sc->bge_cdata.bge_rx_std_chain[i];
1068
1069                 if (rc->bge_mbuf != NULL) {
1070                         bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1071                                           sc->bge_cdata.bge_rx_std_dmamap[i]);
1072                         m_freem(rc->bge_mbuf);
1073                         rc->bge_mbuf = NULL;
1074                 }
1075                 bzero(&sc->bge_ldata.bge_rx_std_ring[i],
1076                     sizeof(struct bge_rx_bd));
1077         }
1078 }
1079
1080 static int
1081 bge_init_rx_ring_jumbo(struct bge_softc *sc)
1082 {
1083         struct bge_rcb *rcb;
1084         int i, error;
1085
1086         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1087                 error = bge_newbuf_jumbo(sc, i, 1);
1088                 if (error)
1089                         return error;
1090         };
1091
1092         sc->bge_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
1093
1094         rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1095         rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
1096         CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1097
1098         bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1099
1100         return(0);
1101 }
1102
1103 static void
1104 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1105 {
1106         int i;
1107
1108         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1109                 struct bge_rxchain *rc = &sc->bge_cdata.bge_rx_jumbo_chain[i];
1110
1111                 if (rc->bge_mbuf != NULL) {
1112                         m_freem(rc->bge_mbuf);
1113                         rc->bge_mbuf = NULL;
1114                 }
1115                 bzero(&sc->bge_ldata.bge_rx_jumbo_ring[i],
1116                     sizeof(struct bge_rx_bd));
1117         }
1118 }
1119
1120 static void
1121 bge_free_tx_ring(struct bge_softc *sc)
1122 {
1123         int i;
1124
1125         for (i = 0; i < BGE_TX_RING_CNT; i++) {
1126                 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1127                         bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1128                                           sc->bge_cdata.bge_tx_dmamap[i]);
1129                         m_freem(sc->bge_cdata.bge_tx_chain[i]);
1130                         sc->bge_cdata.bge_tx_chain[i] = NULL;
1131                 }
1132                 bzero(&sc->bge_ldata.bge_tx_ring[i],
1133                     sizeof(struct bge_tx_bd));
1134         }
1135 }
1136
1137 static int
1138 bge_init_tx_ring(struct bge_softc *sc)
1139 {
1140         sc->bge_txcnt = 0;
1141         sc->bge_tx_saved_considx = 0;
1142         sc->bge_tx_prodidx = 0;
1143
1144         /* Initialize transmit producer index for host-memory send ring. */
1145         bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1146
1147         /* 5700 b2 errata */
1148         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1149                 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1150
1151         bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1152         /* 5700 b2 errata */
1153         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1154                 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1155
1156         return(0);
1157 }
1158
1159 static void
1160 bge_setmulti(struct bge_softc *sc)
1161 {
1162         struct ifnet *ifp;
1163         struct ifmultiaddr *ifma;
1164         uint32_t hashes[4] = { 0, 0, 0, 0 };
1165         int h, i;
1166
1167         ifp = &sc->arpcom.ac_if;
1168
1169         if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1170                 for (i = 0; i < 4; i++)
1171                         CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1172                 return;
1173         }
1174
1175         /* First, zot all the existing filters. */
1176         for (i = 0; i < 4; i++)
1177                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1178
1179         /* Now program new ones. */
1180         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1181                 if (ifma->ifma_addr->sa_family != AF_LINK)
1182                         continue;
1183                 h = ether_crc32_le(
1184                     LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1185                     ETHER_ADDR_LEN) & 0x7f;
1186                 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1187         }
1188
1189         for (i = 0; i < 4; i++)
1190                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1191 }
1192
1193 /*
1194  * Do endian, PCI and DMA initialization. Also check the on-board ROM
1195  * self-test results.
1196  */
1197 static int
1198 bge_chipinit(struct bge_softc *sc)
1199 {
1200         int i;
1201         uint32_t dma_rw_ctl;
1202         uint16_t val;
1203
1204         /* Set endian type before we access any non-PCI registers. */
1205         pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1206
1207         /* Clear the MAC control register */
1208         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1209
1210         /*
1211          * Clear the MAC statistics block in the NIC's
1212          * internal memory.
1213          */
1214         for (i = BGE_STATS_BLOCK;
1215             i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1216                 BGE_MEMWIN_WRITE(sc, i, 0);
1217
1218         for (i = BGE_STATUS_BLOCK;
1219             i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1220                 BGE_MEMWIN_WRITE(sc, i, 0);
1221
1222         if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1223                 /*
1224                  * Fix data corruption caused by non-qword write with WB.
1225                  * Fix master abort in PCI mode.
1226                  * Fix PCI latency timer.
1227                  */
1228                 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1229                 val |= (1 << 10) | (1 << 12) | (1 << 13);
1230                 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1231         }
1232
1233         /* Set up the PCI DMA control register. */
1234         if (sc->bge_flags & BGE_FLAG_PCIE) {
1235                 /* PCI Express */
1236                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1237                     (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1238                     (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1239         } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1240                 /* PCI-X bus */
1241                 if (BGE_IS_5714_FAMILY(sc)) {
1242                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD;
1243                         dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */
1244                         /* XXX magic values, Broadcom-supplied Linux driver */
1245                         if (sc->bge_asicrev == BGE_ASICREV_BCM5780) {
1246                                 dma_rw_ctl |= (1 << 20) | (1 << 18) | 
1247                                     BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1248                         } else {
1249                                 dma_rw_ctl |= (1 << 20) | (1 << 18) | (1 << 15);
1250                         }
1251                 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
1252                         /*
1253                          * In the BCM5703, the DMA read watermark should
1254                          * be set to less than or equal to the maximum
1255                          * memory read byte count of the PCI-X command
1256                          * register.
1257                          */
1258                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1259                             (0x4 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1260                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1261                 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1262                         /*
1263                          * The 5704 uses a different encoding of read/write
1264                          * watermarks.
1265                          */
1266                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1267                             (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1268                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1269                 } else {
1270                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1271                             (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1272                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1273                             (0x0F);
1274                 }
1275
1276                 /*
1277                  * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1278                  * for hardware bugs.
1279                  */
1280                 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1281                     sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1282                         uint32_t tmp;
1283
1284                         tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1285                         if (tmp == 0x6 || tmp == 0x7)
1286                                 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1287                 }
1288         } else {
1289                 /* Conventional PCI bus */
1290                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1291                     (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1292                     (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1293                     (0x0F);
1294         }
1295
1296         if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1297             sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1298             sc->bge_asicrev == BGE_ASICREV_BCM5705)
1299                 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1300         pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1301
1302         /*
1303          * Set up general mode register.
1304          */
1305         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1306             BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1307             BGE_MODECTL_TX_NO_PHDR_CSUM);
1308
1309         /*
1310          * BCM5701 B5 have a bug causing data corruption when using
1311          * 64-bit DMA reads, which can be terminated early and then
1312          * completed later as 32-bit accesses, in combination with
1313          * certain bridges.
1314          */
1315         if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1316             sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1317                 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1318
1319         /*
1320          * Disable memory write invalidate.  Apparently it is not supported
1321          * properly by these devices.
1322          */
1323         PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1324
1325         /* Set the timer prescaler (always 66Mhz) */
1326         CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1327
1328         if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1329                 DELAY(40);      /* XXX */
1330
1331                 /* Put PHY into ready state */
1332                 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1333                 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1334                 DELAY(40);
1335         }
1336
1337         return(0);
1338 }
1339
1340 static int
1341 bge_blockinit(struct bge_softc *sc)
1342 {
1343         struct bge_rcb *rcb;
1344         bus_size_t vrcb;
1345         bge_hostaddr taddr;
1346         uint32_t val;
1347         int i;
1348
1349         /*
1350          * Initialize the memory window pointer register so that
1351          * we can access the first 32K of internal NIC RAM. This will
1352          * allow us to set up the TX send ring RCBs and the RX return
1353          * ring RCBs, plus other things which live in NIC memory.
1354          */
1355         CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1356
1357         /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1358
1359         if (!BGE_IS_5705_PLUS(sc)) {
1360                 /* Configure mbuf memory pool */
1361                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1362                 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1363                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1364                 else
1365                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1366
1367                 /* Configure DMA resource pool */
1368                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1369                     BGE_DMA_DESCRIPTORS);
1370                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1371         }
1372
1373         /* Configure mbuf pool watermarks */
1374         if (!BGE_IS_5705_PLUS(sc)) {
1375                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1376                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1377                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1378         } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1379                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1380                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1381                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1382         } else {
1383                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1384                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1385                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1386         }
1387
1388         /* Configure DMA resource watermarks */
1389         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1390         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1391
1392         /* Enable buffer manager */
1393         if (!BGE_IS_5705_PLUS(sc)) {
1394                 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1395                     BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1396
1397                 /* Poll for buffer manager start indication */
1398                 for (i = 0; i < BGE_TIMEOUT; i++) {
1399                         if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1400                                 break;
1401                         DELAY(10);
1402                 }
1403
1404                 if (i == BGE_TIMEOUT) {
1405                         if_printf(&sc->arpcom.ac_if,
1406                                   "buffer manager failed to start\n");
1407                         return(ENXIO);
1408                 }
1409         }
1410
1411         /* Enable flow-through queues */
1412         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1413         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1414
1415         /* Wait until queue initialization is complete */
1416         for (i = 0; i < BGE_TIMEOUT; i++) {
1417                 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1418                         break;
1419                 DELAY(10);
1420         }
1421
1422         if (i == BGE_TIMEOUT) {
1423                 if_printf(&sc->arpcom.ac_if,
1424                           "flow-through queue init failed\n");
1425                 return(ENXIO);
1426         }
1427
1428         /* Initialize the standard RX ring control block */
1429         rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1430         rcb->bge_hostaddr.bge_addr_lo =
1431             BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1432         rcb->bge_hostaddr.bge_addr_hi =
1433             BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1434         if (BGE_IS_5705_PLUS(sc))
1435                 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1436         else
1437                 rcb->bge_maxlen_flags =
1438                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1439         rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1440         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1441         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1442         CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1443         CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1444
1445         /*
1446          * Initialize the jumbo RX ring control block
1447          * We set the 'ring disabled' bit in the flags
1448          * field until we're actually ready to start
1449          * using this ring (i.e. once we set the MTU
1450          * high enough to require it).
1451          */
1452         if (BGE_IS_JUMBO_CAPABLE(sc)) {
1453                 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1454
1455                 rcb->bge_hostaddr.bge_addr_lo =
1456                     BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1457                 rcb->bge_hostaddr.bge_addr_hi =
1458                     BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1459                 rcb->bge_maxlen_flags =
1460                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1461                     BGE_RCB_FLAG_RING_DISABLED);
1462                 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1463                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1464                     rcb->bge_hostaddr.bge_addr_hi);
1465                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1466                     rcb->bge_hostaddr.bge_addr_lo);
1467                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1468                     rcb->bge_maxlen_flags);
1469                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1470
1471                 /* Set up dummy disabled mini ring RCB */
1472                 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1473                 rcb->bge_maxlen_flags =
1474                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1475                 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1476                     rcb->bge_maxlen_flags);
1477         }
1478
1479         /*
1480          * Set the BD ring replentish thresholds. The recommended
1481          * values are 1/8th the number of descriptors allocated to
1482          * each ring.
1483          */
1484         if (BGE_IS_5705_PLUS(sc))
1485                 val = 8;
1486         else
1487                 val = BGE_STD_RX_RING_CNT / 8;
1488         CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1489         if (BGE_IS_JUMBO_CAPABLE(sc)) {
1490                 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1491                     BGE_JUMBO_RX_RING_CNT/8);
1492         }
1493
1494         /*
1495          * Disable all unused send rings by setting the 'ring disabled'
1496          * bit in the flags field of all the TX send ring control blocks.
1497          * These are located in NIC memory.
1498          */
1499         vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1500         for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1501                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1502                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1503                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1504                 vrcb += sizeof(struct bge_rcb);
1505         }
1506
1507         /* Configure TX RCB 0 (we use only the first ring) */
1508         vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1509         BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1510         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1511         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1512         RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1513             BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1514         if (!BGE_IS_5705_PLUS(sc)) {
1515                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1516                     BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1517         }
1518
1519         /* Disable all unused RX return rings */
1520         vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1521         for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1522                 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1523                 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1524                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1525                     BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1526                     BGE_RCB_FLAG_RING_DISABLED));
1527                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1528                 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1529                     (i * (sizeof(uint64_t))), 0);
1530                 vrcb += sizeof(struct bge_rcb);
1531         }
1532
1533         /* Initialize RX ring indexes */
1534         bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1535         if (BGE_IS_JUMBO_CAPABLE(sc))
1536                 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1537         bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1538
1539         /*
1540          * Set up RX return ring 0
1541          * Note that the NIC address for RX return rings is 0x00000000.
1542          * The return rings live entirely within the host, so the
1543          * nicaddr field in the RCB isn't used.
1544          */
1545         vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1546         BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1547         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1548         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1549         RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1550         RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1551             BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1552
1553         /* Set random backoff seed for TX */
1554         CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1555             sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1556             sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1557             sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1558             BGE_TX_BACKOFF_SEED_MASK);
1559
1560         /* Set inter-packet gap */
1561         CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1562
1563         /*
1564          * Specify which ring to use for packets that don't match
1565          * any RX rules.
1566          */
1567         CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1568
1569         /*
1570          * Configure number of RX lists. One interrupt distribution
1571          * list, sixteen active lists, one bad frames class.
1572          */
1573         CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1574
1575         /* Inialize RX list placement stats mask. */
1576         CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1577         CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1578
1579         /* Disable host coalescing until we get it set up */
1580         CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1581
1582         /* Poll to make sure it's shut down. */
1583         for (i = 0; i < BGE_TIMEOUT; i++) {
1584                 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1585                         break;
1586                 DELAY(10);
1587         }
1588
1589         if (i == BGE_TIMEOUT) {
1590                 if_printf(&sc->arpcom.ac_if,
1591                           "host coalescing engine failed to idle\n");
1592                 return(ENXIO);
1593         }
1594
1595         /* Set up host coalescing defaults */
1596         CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1597         CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1598         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1599         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1600         if (!BGE_IS_5705_PLUS(sc)) {
1601                 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1602                 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1603         }
1604         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1605         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1606
1607         /* Set up address of statistics block */
1608         if (!BGE_IS_5705_PLUS(sc)) {
1609                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1610                     BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1611                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1612                     BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1613
1614                 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1615                 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1616                 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1617         }
1618
1619         /* Set up address of status block */
1620         bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1621         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1622             BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1623         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1624             BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1625
1626         /*
1627          * Set up status block partail update size.
1628          *
1629          * Because only single TX ring, RX produce ring and Rx return ring
1630          * are used, ask device to update only minimum part of status block
1631          * except for BCM5700 AX/BX, whose status block partial update size
1632          * can't be configured.
1633          */
1634         if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1635             sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
1636                 /* XXX Actually reserved on BCM5700 AX/BX */
1637                 val = BGE_STATBLKSZ_FULL;
1638         } else {
1639                 val = BGE_STATBLKSZ_32BYTE;
1640         }
1641
1642         /* Turn on host coalescing state machine */
1643         CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1644
1645         /* Turn on RX BD completion state machine and enable attentions */
1646         CSR_WRITE_4(sc, BGE_RBDC_MODE,
1647             BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1648
1649         /* Turn on RX list placement state machine */
1650         CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1651
1652         /* Turn on RX list selector state machine. */
1653         if (!BGE_IS_5705_PLUS(sc))
1654                 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1655
1656         val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1657             BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1658             BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1659             BGE_MACMODE_FRMHDR_DMA_ENB;
1660
1661         if (sc->bge_flags & BGE_FLAG_TBI)
1662                 val |= BGE_PORTMODE_TBI;
1663         else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
1664                 val |= BGE_PORTMODE_GMII;
1665         else
1666                 val |= BGE_PORTMODE_MII;
1667
1668         /* Turn on DMA, clear stats */
1669         CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1670
1671         /* Set misc. local control, enable interrupts on attentions */
1672         CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1673
1674 #ifdef notdef
1675         /* Assert GPIO pins for PHY reset */
1676         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1677             BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1678         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1679             BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1680 #endif
1681
1682         /* Turn on DMA completion state machine */
1683         if (!BGE_IS_5705_PLUS(sc))
1684                 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1685
1686         /* Turn on write DMA state machine */
1687         val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1688         if (BGE_IS_5755_PLUS(sc)) {
1689                 /* Enable host coalescing bug fix. */
1690                 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1691         }
1692         if (sc->bge_asicrev == BGE_ASICREV_BCM5785) {
1693                 /* Request larger DMA burst size to get better performance. */
1694                 val |= BGE_WDMAMODE_BURST_ALL_DATA;
1695         }
1696         CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1697         DELAY(40);
1698
1699         /* Turn on read DMA state machine */
1700         val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1701         if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1702             sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1703             sc->bge_asicrev == BGE_ASICREV_BCM57780)
1704                 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1705                   BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1706                   BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1707         if (sc->bge_flags & BGE_FLAG_PCIE)
1708                 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1709         CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1710         DELAY(40);
1711
1712         /* Turn on RX data completion state machine */
1713         CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1714
1715         /* Turn on RX BD initiator state machine */
1716         CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1717
1718         /* Turn on RX data and RX BD initiator state machine */
1719         CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1720
1721         /* Turn on Mbuf cluster free state machine */
1722         if (!BGE_IS_5705_PLUS(sc))
1723                 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1724
1725         /* Turn on send BD completion state machine */
1726         CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1727
1728         /* Turn on send data completion state machine */
1729         val = BGE_SDCMODE_ENABLE;
1730         if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
1731                 val |= BGE_SDCMODE_CDELAY; 
1732         CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1733
1734         /* Turn on send data initiator state machine */
1735         CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1736
1737         /* Turn on send BD initiator state machine */
1738         CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1739
1740         /* Turn on send BD selector state machine */
1741         CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1742
1743         CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1744         CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1745             BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1746
1747         /* ack/clear link change events */
1748         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1749             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1750             BGE_MACSTAT_LINK_CHANGED);
1751         CSR_WRITE_4(sc, BGE_MI_STS, 0);
1752
1753         /* Enable PHY auto polling (for MII/GMII only) */
1754         if (sc->bge_flags & BGE_FLAG_TBI) {
1755                 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1756         } else {
1757                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1758                 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1759                     sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
1760                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1761                             BGE_EVTENB_MI_INTERRUPT);
1762                 }
1763         }
1764
1765         /*
1766          * Clear any pending link state attention.
1767          * Otherwise some link state change events may be lost until attention
1768          * is cleared by bge_intr() -> bge_softc.bge_link_upd() sequence.
1769          * It's not necessary on newer BCM chips - perhaps enabling link
1770          * state change attentions implies clearing pending attention.
1771          */
1772         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1773             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1774             BGE_MACSTAT_LINK_CHANGED);
1775
1776         /* Enable link state change attentions. */
1777         BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1778
1779         return(0);
1780 }
1781
1782 /*
1783  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1784  * against our list and return its name if we find a match. Note
1785  * that since the Broadcom controller contains VPD support, we
1786  * can get the device name string from the controller itself instead
1787  * of the compiled-in string. This is a little slow, but it guarantees
1788  * we'll always announce the right product name.
1789  */
1790 static int
1791 bge_probe(device_t dev)
1792 {
1793         const struct bge_type *t;
1794         uint16_t product, vendor;
1795
1796         product = pci_get_device(dev);
1797         vendor = pci_get_vendor(dev);
1798
1799         for (t = bge_devs; t->bge_name != NULL; t++) {
1800                 if (vendor == t->bge_vid && product == t->bge_did)
1801                         break;
1802         }
1803         if (t->bge_name == NULL)
1804                 return(ENXIO);
1805
1806         device_set_desc(dev, t->bge_name);
1807         if (pci_get_subvendor(dev) == PCI_VENDOR_DELL) {
1808                 struct bge_softc *sc = device_get_softc(dev);
1809                 sc->bge_flags |= BGE_FLAG_NO_3LED;
1810         }
1811         return(0);
1812 }
1813
1814 static int
1815 bge_attach(device_t dev)
1816 {
1817         struct ifnet *ifp;
1818         struct bge_softc *sc;
1819         uint32_t hwcfg = 0;
1820         int error = 0, rid;
1821         uint8_t ether_addr[ETHER_ADDR_LEN];
1822
1823         sc = device_get_softc(dev);
1824         sc->bge_dev = dev;
1825         callout_init(&sc->bge_stat_timer);
1826         lwkt_serialize_init(&sc->bge_jslot_serializer);
1827
1828 #ifndef BURN_BRIDGES
1829         if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1830                 uint32_t irq, mem;
1831
1832                 irq = pci_read_config(dev, PCIR_INTLINE, 4);
1833                 mem = pci_read_config(dev, BGE_PCI_BAR0, 4);
1834
1835                 device_printf(dev, "chip is in D%d power mode "
1836                     "-- setting to D0\n", pci_get_powerstate(dev));
1837
1838                 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1839
1840                 pci_write_config(dev, PCIR_INTLINE, irq, 4);
1841                 pci_write_config(dev, BGE_PCI_BAR0, mem, 4);
1842         }
1843 #endif  /* !BURN_BRIDGE */
1844
1845         /*
1846          * Map control/status registers.
1847          */
1848         pci_enable_busmaster(dev);
1849
1850         rid = BGE_PCI_BAR0;
1851         sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1852             RF_ACTIVE);
1853
1854         if (sc->bge_res == NULL) {
1855                 device_printf(dev, "couldn't map memory\n");
1856                 return ENXIO;
1857         }
1858
1859         sc->bge_btag = rman_get_bustag(sc->bge_res);
1860         sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
1861
1862         /* Save various chip information */
1863         sc->bge_chipid =
1864             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
1865             BGE_PCIMISCCTL_ASICREV_SHIFT;
1866         if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG)
1867                 sc->bge_chipid = pci_read_config(dev, BGE_PCI_PRODID_ASICREV, 4);
1868         sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
1869         sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
1870
1871         /* Save chipset family. */
1872         switch (sc->bge_asicrev) {
1873         case BGE_ASICREV_BCM5755:
1874         case BGE_ASICREV_BCM5761:
1875         case BGE_ASICREV_BCM5784:
1876         case BGE_ASICREV_BCM5785:
1877         case BGE_ASICREV_BCM5787:
1878         case BGE_ASICREV_BCM57780:
1879             sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
1880                 BGE_FLAG_5705_PLUS;
1881             break;
1882
1883         case BGE_ASICREV_BCM5700:
1884         case BGE_ASICREV_BCM5701:
1885         case BGE_ASICREV_BCM5703:
1886         case BGE_ASICREV_BCM5704:
1887                 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
1888                 break;
1889
1890         case BGE_ASICREV_BCM5714_A0:
1891         case BGE_ASICREV_BCM5780:
1892         case BGE_ASICREV_BCM5714:
1893                 sc->bge_flags |= BGE_FLAG_5714_FAMILY;
1894                 /* Fall through */
1895
1896         case BGE_ASICREV_BCM5750:
1897         case BGE_ASICREV_BCM5752:
1898         case BGE_ASICREV_BCM5906:
1899                 sc->bge_flags |= BGE_FLAG_575X_PLUS;
1900                 /* Fall through */
1901
1902         case BGE_ASICREV_BCM5705:
1903                 sc->bge_flags |= BGE_FLAG_5705_PLUS;
1904                 break;
1905         }
1906
1907         if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
1908                 sc->bge_flags |= BGE_FLAG_NO_EEPROM;
1909
1910         /*
1911          * Set various quirk flags.
1912          */
1913
1914         sc->bge_flags |= BGE_FLAG_ETH_WIRESPEED;
1915         if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1916             (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
1917              (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
1918               sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
1919             sc->bge_asicrev == BGE_ASICREV_BCM5906)
1920                 sc->bge_flags &= ~BGE_FLAG_ETH_WIRESPEED;
1921
1922         if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
1923             sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
1924                 sc->bge_flags |= BGE_FLAG_CRC_BUG;
1925
1926         if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
1927             sc->bge_chiprev == BGE_CHIPREV_5704_AX)
1928                 sc->bge_flags |= BGE_FLAG_ADC_BUG;
1929
1930         if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
1931                 sc->bge_flags |= BGE_FLAG_5704_A0_BUG;
1932
1933         if (BGE_IS_5705_PLUS(sc)) {
1934                 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1935                     sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
1936                     sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1937                     sc->bge_asicrev == BGE_ASICREV_BCM5787) {
1938                         uint32_t product = pci_get_device(dev);
1939
1940                         if (product != PCI_PRODUCT_BROADCOM_BCM5722 &&
1941                             product != PCI_PRODUCT_BROADCOM_BCM5756)
1942                                 sc->bge_flags |= BGE_FLAG_JITTER_BUG;
1943                         if (product == PCI_PRODUCT_BROADCOM_BCM5755M)
1944                                 sc->bge_flags |= BGE_FLAG_ADJUST_TRIM;
1945                 } else if (sc->bge_asicrev != BGE_ASICREV_BCM5906) {
1946                         sc->bge_flags |= BGE_FLAG_BER_BUG;
1947                 }
1948         }
1949
1950         /* Allocate interrupt */
1951         rid = 0;
1952
1953         sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1954             RF_SHAREABLE | RF_ACTIVE);
1955
1956         if (sc->bge_irq == NULL) {
1957                 device_printf(dev, "couldn't map interrupt\n");
1958                 error = ENXIO;
1959                 goto fail;
1960         }
1961
1962         /*
1963          * Check if this is a PCI-X or PCI Express device.
1964          */
1965         if (BGE_IS_5705_PLUS(sc)) {
1966                 if (pci_is_pcie(dev)) {
1967                         sc->bge_flags |= BGE_FLAG_PCIE;
1968                         pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096);
1969                 }
1970         } else {
1971                 /*
1972                  * Check if the device is in PCI-X Mode.
1973                  * (This bit is not valid on PCI Express controllers.)
1974                  */
1975                 if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1976                     BGE_PCISTATE_PCI_BUSMODE) == 0) {
1977                         sc->bge_flags |= BGE_FLAG_PCIX;
1978                         sc->bge_pcixcap = pci_get_pcixcap_ptr(sc->bge_dev);
1979                 }
1980         }
1981
1982         device_printf(dev, "CHIP ID 0x%08x; "
1983                       "ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
1984                       sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
1985                       (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X"
1986                       : ((sc->bge_flags & BGE_FLAG_PCIE) ?
1987                         "PCI-E" : "PCI"));
1988
1989         /*
1990          * The 40bit DMA bug applies to the 5714/5715 controllers and is
1991          * not actually a MAC controller bug but an issue with the embedded
1992          * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
1993          */
1994         if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
1995                 sc->bge_flags |= BGE_FLAG_MAXADDR_40BIT;
1996
1997         ifp = &sc->arpcom.ac_if;
1998         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1999
2000         /* Try to reset the chip. */
2001         bge_reset(sc);
2002
2003         if (bge_chipinit(sc)) {
2004                 device_printf(dev, "chip initialization failed\n");
2005                 error = ENXIO;
2006                 goto fail;
2007         }
2008
2009         /*
2010          * Get station address
2011          */
2012         error = bge_get_eaddr(sc, ether_addr);
2013         if (error) {
2014                 device_printf(dev, "failed to read station address\n");
2015                 goto fail;
2016         }
2017
2018         /* 5705/5750 limits RX return ring to 512 entries. */
2019         if (BGE_IS_5705_PLUS(sc))
2020                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2021         else
2022                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2023
2024         error = bge_dma_alloc(sc);
2025         if (error)
2026                 goto fail;
2027
2028         /* Set default tuneable values. */
2029         sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2030         sc->bge_rx_coal_ticks = bge_rx_coal_ticks;
2031         sc->bge_tx_coal_ticks = bge_tx_coal_ticks;
2032         sc->bge_rx_max_coal_bds = bge_rx_max_coal_bds;
2033         sc->bge_tx_max_coal_bds = bge_tx_max_coal_bds;
2034
2035         /* Set up ifnet structure */
2036         ifp->if_softc = sc;
2037         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2038         ifp->if_ioctl = bge_ioctl;
2039         ifp->if_start = bge_start;
2040 #ifdef DEVICE_POLLING
2041         ifp->if_poll = bge_poll;
2042 #endif
2043         ifp->if_watchdog = bge_watchdog;
2044         ifp->if_init = bge_init;
2045         ifp->if_mtu = ETHERMTU;
2046         ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2047         ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
2048         ifq_set_ready(&ifp->if_snd);
2049
2050         /*
2051          * 5700 B0 chips do not support checksumming correctly due
2052          * to hardware bugs.
2053          */
2054         if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) {
2055                 ifp->if_capabilities |= IFCAP_HWCSUM;
2056                 ifp->if_hwassist = BGE_CSUM_FEATURES;
2057         }
2058         ifp->if_capenable = ifp->if_capabilities;
2059
2060         /*
2061          * Figure out what sort of media we have by checking the
2062          * hardware config word in the first 32k of NIC internal memory,
2063          * or fall back to examining the EEPROM if necessary.
2064          * Note: on some BCM5700 cards, this value appears to be unset.
2065          * If that's the case, we have to rely on identifying the NIC
2066          * by its PCI subsystem ID, as we do below for the SysKonnect
2067          * SK-9D41.
2068          */
2069         if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2070                 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2071         else {
2072                 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2073                                     sizeof(hwcfg))) {
2074                         device_printf(dev, "failed to read EEPROM\n");
2075                         error = ENXIO;
2076                         goto fail;
2077                 }
2078                 hwcfg = ntohl(hwcfg);
2079         }
2080
2081         /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2082         if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41 ||
2083             (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
2084                 if (BGE_IS_5714_FAMILY(sc))
2085                         sc->bge_flags |= BGE_FLAG_MII_SERDES;
2086                 else
2087                         sc->bge_flags |= BGE_FLAG_TBI;
2088         }
2089
2090         /*
2091          * Broadcom's own driver always assumes the internal
2092          * PHY is at GMII address 1.  On some chips, the PHY responds
2093          * to accesses at all addresses, which could cause us to
2094          * bogusly attach the PHY 32 times at probe type.  Always
2095          * restricting the lookup to address 1 is simpler than
2096          * trying to figure out which chips revisions should be
2097          * special-cased.
2098          */
2099         sc->bge_phyno = 1;
2100
2101         if (sc->bge_flags & BGE_FLAG_TBI) {
2102                 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2103                     bge_ifmedia_upd, bge_ifmedia_sts);
2104                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2105                 ifmedia_add(&sc->bge_ifmedia,
2106                     IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2107                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2108                 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2109                 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2110         } else {
2111                 struct mii_probe_args mii_args;
2112
2113                 mii_probe_args_init(&mii_args, bge_ifmedia_upd, bge_ifmedia_sts);
2114                 mii_args.mii_probemask = 1 << sc->bge_phyno;
2115
2116                 error = mii_probe(dev, &sc->bge_miibus, &mii_args);
2117                 if (error) {
2118                         device_printf(dev, "MII without any PHY!\n");
2119                         goto fail;
2120                 }
2121         }
2122
2123         /*
2124          * When using the BCM5701 in PCI-X mode, data corruption has
2125          * been observed in the first few bytes of some received packets.
2126          * Aligning the packet buffer in memory eliminates the corruption.
2127          * Unfortunately, this misaligns the packet payloads.  On platforms
2128          * which do not support unaligned accesses, we will realign the
2129          * payloads by copying the received packets.
2130          */
2131         if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
2132             (sc->bge_flags & BGE_FLAG_PCIX))
2133                 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
2134
2135         if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2136             sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
2137                 sc->bge_link_upd = bge_bcm5700_link_upd;
2138                 sc->bge_link_chg = BGE_MACSTAT_MI_INTERRUPT;
2139         } else if (sc->bge_flags & BGE_FLAG_TBI) {
2140                 sc->bge_link_upd = bge_tbi_link_upd;
2141                 sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED;
2142         } else {
2143                 sc->bge_link_upd = bge_copper_link_upd;
2144                 sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED;
2145         }
2146
2147         /*
2148          * Create sysctl nodes.
2149          */
2150         sysctl_ctx_init(&sc->bge_sysctl_ctx);
2151         sc->bge_sysctl_tree = SYSCTL_ADD_NODE(&sc->bge_sysctl_ctx,
2152                                               SYSCTL_STATIC_CHILDREN(_hw),
2153                                               OID_AUTO,
2154                                               device_get_nameunit(dev),
2155                                               CTLFLAG_RD, 0, "");
2156         if (sc->bge_sysctl_tree == NULL) {
2157                 device_printf(dev, "can't add sysctl node\n");
2158                 error = ENXIO;
2159                 goto fail;
2160         }
2161
2162         SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2163                         SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2164                         OID_AUTO, "rx_coal_ticks",
2165                         CTLTYPE_INT | CTLFLAG_RW,
2166                         sc, 0, bge_sysctl_rx_coal_ticks, "I",
2167                         "Receive coalescing ticks (usec).");
2168         SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2169                         SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2170                         OID_AUTO, "tx_coal_ticks",
2171                         CTLTYPE_INT | CTLFLAG_RW,
2172                         sc, 0, bge_sysctl_tx_coal_ticks, "I",
2173                         "Transmit coalescing ticks (usec).");
2174         SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2175                         SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2176                         OID_AUTO, "rx_max_coal_bds",
2177                         CTLTYPE_INT | CTLFLAG_RW,
2178                         sc, 0, bge_sysctl_rx_max_coal_bds, "I",
2179                         "Receive max coalesced BD count.");
2180         SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2181                         SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2182                         OID_AUTO, "tx_max_coal_bds",
2183                         CTLTYPE_INT | CTLFLAG_RW,
2184                         sc, 0, bge_sysctl_tx_max_coal_bds, "I",
2185                         "Transmit max coalesced BD count.");
2186
2187         if (sc->bge_flags & BGE_FLAG_PCIE) {
2188                 /*
2189                  * A common design characteristic for many Broadcom
2190                  * client controllers is that they only support a
2191                  * single outstanding DMA read operation on the PCIe
2192                  * bus. This means that it will take twice as long to
2193                  * fetch a TX frame that is split into header and
2194                  * payload buffers as it does to fetch a single,
2195                  * contiguous TX frame (2 reads vs. 1 read). For these
2196                  * controllers, coalescing buffers to reduce the number
2197                  * of memory reads is effective way to get maximum
2198                  * performance(about 940Mbps).  Without collapsing TX
2199                  * buffers the maximum TCP bulk transfer performance
2200                  * is about 850Mbps. However forcing coalescing mbufs
2201                  * consumes a lot of CPU cycles, so leave it off by
2202                  * default.
2203                  */
2204                 SYSCTL_ADD_INT(&sc->bge_sysctl_ctx,
2205                                SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2206                                OID_AUTO, "force_defrag", CTLFLAG_RW,
2207                                &sc->bge_force_defrag, 0,
2208                                "Force defragment on TX path");
2209         }
2210
2211         /*
2212          * Call MI attach routine.
2213          */
2214         ether_ifattach(ifp, ether_addr, NULL);
2215
2216         error = bus_setup_intr(dev, sc->bge_irq, INTR_MPSAFE,
2217                                bge_intr, sc, &sc->bge_intrhand, 
2218                                ifp->if_serializer);
2219         if (error) {
2220                 ether_ifdetach(ifp);
2221                 device_printf(dev, "couldn't set up irq\n");
2222                 goto fail;
2223         }
2224
2225         ifp->if_cpuid = rman_get_cpuid(sc->bge_irq);
2226         KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
2227
2228         return(0);
2229 fail:
2230         bge_detach(dev);
2231         return(error);
2232 }
2233
2234 static int
2235 bge_detach(device_t dev)
2236 {
2237         struct bge_softc *sc = device_get_softc(dev);
2238
2239         if (device_is_attached(dev)) {
2240                 struct ifnet *ifp = &sc->arpcom.ac_if;
2241
2242                 lwkt_serialize_enter(ifp->if_serializer);
2243                 bge_stop(sc);
2244                 bge_reset(sc);
2245                 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2246                 lwkt_serialize_exit(ifp->if_serializer);
2247
2248                 ether_ifdetach(ifp);
2249         }
2250
2251         if (sc->bge_flags & BGE_FLAG_TBI)
2252                 ifmedia_removeall(&sc->bge_ifmedia);
2253         if (sc->bge_miibus)
2254                 device_delete_child(dev, sc->bge_miibus);
2255         bus_generic_detach(dev);
2256
2257         if (sc->bge_irq != NULL)
2258                 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2259
2260         if (sc->bge_res != NULL)
2261                 bus_release_resource(dev, SYS_RES_MEMORY,
2262                     BGE_PCI_BAR0, sc->bge_res);
2263
2264         if (sc->bge_sysctl_tree != NULL)
2265                 sysctl_ctx_free(&sc->bge_sysctl_ctx);
2266
2267         bge_dma_free(sc);
2268
2269         return 0;
2270 }
2271
2272 static void
2273 bge_reset(struct bge_softc *sc)
2274 {
2275         device_t dev;
2276         uint32_t cachesize, command, pcistate, reset;
2277         void (*write_op)(struct bge_softc *, uint32_t, uint32_t);
2278         int i, val = 0;
2279
2280         dev = sc->bge_dev;
2281
2282         if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
2283             sc->bge_asicrev != BGE_ASICREV_BCM5906) {
2284                 if (sc->bge_flags & BGE_FLAG_PCIE)
2285                         write_op = bge_writemem_direct;
2286                 else
2287                         write_op = bge_writemem_ind;
2288         } else {
2289                 write_op = bge_writereg_ind;
2290         }
2291
2292         /* Save some important PCI state. */
2293         cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2294         command = pci_read_config(dev, BGE_PCI_CMD, 4);
2295         pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2296
2297         pci_write_config(dev, BGE_PCI_MISC_CTL,
2298             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2299             BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2300
2301         /* Disable fastboot on controllers that support it. */
2302         if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
2303             BGE_IS_5755_PLUS(sc)) {
2304                 if (bootverbose)
2305                         if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n");
2306                 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2307         }
2308
2309         /*
2310          * Write the magic number to SRAM at offset 0xB50.
2311          * When firmware finishes its initialization it will
2312          * write ~BGE_MAGIC_NUMBER to the same location.
2313          */
2314         bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2315
2316         reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2317
2318         /* XXX: Broadcom Linux driver. */
2319         if (sc->bge_flags & BGE_FLAG_PCIE) {
2320                 if (CSR_READ_4(sc, 0x7e2c) == 0x60)     /* PCIE 1.0 */
2321                         CSR_WRITE_4(sc, 0x7e2c, 0x20);
2322                 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2323                         /* Prevent PCIE link training during global reset */
2324                         CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2325                         reset |= (1<<29);
2326                 }
2327         }
2328
2329         /* 
2330          * Set GPHY Power Down Override to leave GPHY
2331          * powered up in D0 uninitialized.
2332          */
2333         if (BGE_IS_5705_PLUS(sc))
2334                 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
2335
2336         /* Issue global reset */
2337         write_op(sc, BGE_MISC_CFG, reset);
2338
2339         if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2340                 uint32_t status, ctrl;
2341
2342                 status = CSR_READ_4(sc, BGE_VCPU_STATUS);
2343                 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
2344                     status | BGE_VCPU_STATUS_DRV_RESET);
2345                 ctrl = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
2346                 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
2347                     ctrl & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
2348         }
2349
2350         DELAY(1000);
2351
2352         /* XXX: Broadcom Linux driver. */
2353         if (sc->bge_flags & BGE_FLAG_PCIE) {
2354                 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2355                         uint32_t v;
2356
2357                         DELAY(500000); /* wait for link training to complete */
2358                         v = pci_read_config(dev, 0xc4, 4);
2359                         pci_write_config(dev, 0xc4, v | (1<<15), 4);
2360                 }
2361                 /*
2362                  * Set PCIE max payload size to 128 bytes and
2363                  * clear error status.
2364                  */
2365                 pci_write_config(dev, 0xd8, 0xf5000, 4);
2366         }
2367
2368         /* Reset some of the PCI state that got zapped by reset */
2369         pci_write_config(dev, BGE_PCI_MISC_CTL,
2370             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2371             BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2372         pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2373         pci_write_config(dev, BGE_PCI_CMD, command, 4);
2374         write_op(sc, BGE_MISC_CFG, (65 << 1));
2375
2376         /*
2377          * Disable PCI-X relaxed ordering to ensure status block update
2378          * comes first then packet buffer DMA. Otherwise driver may
2379          * read stale status block.
2380          */
2381         if (sc->bge_flags & BGE_FLAG_PCIX) {
2382                 uint16_t devctl;
2383
2384                 devctl = pci_read_config(dev,
2385                     sc->bge_pcixcap + PCIXR_COMMAND, 2);
2386                 devctl &= ~PCIXM_COMMAND_ERO;
2387                 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
2388                         devctl &= ~PCIXM_COMMAND_MAX_READ;
2389                         devctl |= PCIXM_COMMAND_MAX_READ_2048;
2390                 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
2391                         devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
2392                             PCIXM_COMMAND_MAX_READ);
2393                         devctl |= PCIXM_COMMAND_MAX_READ_2048;
2394                 }
2395                 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
2396                     devctl, 2);
2397         }
2398
2399         /* Enable memory arbiter. */
2400         if (BGE_IS_5714_FAMILY(sc)) {
2401                 uint32_t val;
2402
2403                 val = CSR_READ_4(sc, BGE_MARB_MODE);
2404                 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2405         } else {
2406                 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2407         }
2408
2409         if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2410                 for (i = 0; i < BGE_TIMEOUT; i++) {
2411                         val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2412                         if (val & BGE_VCPU_STATUS_INIT_DONE)
2413                                 break;
2414                         DELAY(100);
2415                 }
2416                 if (i == BGE_TIMEOUT) {
2417                         if_printf(&sc->arpcom.ac_if, "reset timed out\n");
2418                         return;
2419                 }
2420         } else {
2421                 /*
2422                  * Poll until we see the 1's complement of the magic number.
2423                  * This indicates that the firmware initialization
2424                  * is complete.
2425                  */
2426                 for (i = 0; i < BGE_FIRMWARE_TIMEOUT; i++) {
2427                         val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2428                         if (val == ~BGE_MAGIC_NUMBER)
2429                                 break;
2430                         DELAY(10);
2431                 }
2432                 if (i == BGE_FIRMWARE_TIMEOUT) {
2433                         if_printf(&sc->arpcom.ac_if, "firmware handshake "
2434                                   "timed out, found 0x%08x\n", val);
2435                         return;
2436                 }
2437         }
2438
2439         /*
2440          * XXX Wait for the value of the PCISTATE register to
2441          * return to its original pre-reset state. This is a
2442          * fairly good indicator of reset completion. If we don't
2443          * wait for the reset to fully complete, trying to read
2444          * from the device's non-PCI registers may yield garbage
2445          * results.
2446          */
2447         for (i = 0; i < BGE_TIMEOUT; i++) {
2448                 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2449                         break;
2450                 DELAY(10);
2451         }
2452
2453         /* Fix up byte swapping */
2454         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
2455             BGE_MODECTL_BYTESWAP_DATA);
2456
2457         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2458
2459         /*
2460          * The 5704 in TBI mode apparently needs some special
2461          * adjustment to insure the SERDES drive level is set
2462          * to 1.2V.
2463          */
2464         if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
2465             (sc->bge_flags & BGE_FLAG_TBI)) {
2466                 uint32_t serdescfg;
2467
2468                 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2469                 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2470                 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2471         }
2472
2473         /* XXX: Broadcom Linux driver. */
2474         if ((sc->bge_flags & BGE_FLAG_PCIE) &&
2475             sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
2476             sc->bge_asicrev != BGE_ASICREV_BCM5785) {
2477                 uint32_t v;
2478
2479                 /* Enable Data FIFO protection. */
2480                 v = CSR_READ_4(sc, 0x7c00);
2481                 CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2482         }
2483
2484         DELAY(10000);
2485 }
2486
2487 /*
2488  * Frame reception handling. This is called if there's a frame
2489  * on the receive return list.
2490  *
2491  * Note: we have to be able to handle two possibilities here:
2492  * 1) the frame is from the jumbo recieve ring
2493  * 2) the frame is from the standard receive ring
2494  */
2495
2496 static void
2497 bge_rxeof(struct bge_softc *sc)
2498 {
2499         struct ifnet *ifp;
2500         int stdcnt = 0, jumbocnt = 0;
2501
2502         if (sc->bge_rx_saved_considx ==
2503             sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx)
2504                 return;
2505
2506         ifp = &sc->arpcom.ac_if;
2507
2508         while (sc->bge_rx_saved_considx !=
2509                sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2510                 struct bge_rx_bd        *cur_rx;
2511                 uint32_t                rxidx;
2512                 struct mbuf             *m = NULL;
2513                 uint16_t                vlan_tag = 0;
2514                 int                     have_tag = 0;
2515
2516                 cur_rx =
2517             &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2518
2519                 rxidx = cur_rx->bge_idx;
2520                 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2521                 logif(rx_pkt);
2522
2523                 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2524                         have_tag = 1;
2525                         vlan_tag = cur_rx->bge_vlan_tag;
2526                 }
2527
2528                 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2529                         BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2530                         jumbocnt++;
2531
2532                         if (rxidx != sc->bge_jumbo) {
2533                                 ifp->if_ierrors++;
2534                                 if_printf(ifp, "sw jumbo index(%d) "
2535                                     "and hw jumbo index(%d) mismatch, drop!\n",
2536                                     sc->bge_jumbo, rxidx);
2537                                 bge_setup_rxdesc_jumbo(sc, rxidx);
2538                                 continue;
2539                         }
2540
2541                         m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx].bge_mbuf;
2542                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2543                                 ifp->if_ierrors++;
2544                                 bge_setup_rxdesc_jumbo(sc, sc->bge_jumbo);
2545                                 continue;
2546                         }
2547                         if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 0)) {
2548                                 ifp->if_ierrors++;
2549                                 bge_setup_rxdesc_jumbo(sc, sc->bge_jumbo);
2550                                 continue;
2551                         }
2552                 } else {
2553                         BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2554                         stdcnt++;
2555
2556                         if (rxidx != sc->bge_std) {
2557                                 ifp->if_ierrors++;
2558                                 if_printf(ifp, "sw std index(%d) "
2559                                     "and hw std index(%d) mismatch, drop!\n",
2560                                     sc->bge_std, rxidx);
2561                                 bge_setup_rxdesc_std(sc, rxidx);
2562                                 continue;
2563                         }
2564
2565                         m = sc->bge_cdata.bge_rx_std_chain[rxidx].bge_mbuf;
2566                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2567                                 ifp->if_ierrors++;
2568                                 bge_setup_rxdesc_std(sc, sc->bge_std);
2569                                 continue;
2570                         }
2571                         if (bge_newbuf_std(sc, sc->bge_std, 0)) {
2572                                 ifp->if_ierrors++;
2573                                 bge_setup_rxdesc_std(sc, sc->bge_std);
2574                                 continue;
2575                         }
2576                 }
2577
2578                 ifp->if_ipackets++;
2579 #if !defined(__i386__) && !defined(__x86_64__)
2580                 /*
2581                  * The x86 allows unaligned accesses, but for other
2582                  * platforms we must make sure the payload is aligned.
2583                  */
2584                 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
2585                         bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2586                             cur_rx->bge_len);
2587                         m->m_data += ETHER_ALIGN;
2588                 }
2589 #endif
2590                 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2591                 m->m_pkthdr.rcvif = ifp;
2592
2593                 if (ifp->if_capenable & IFCAP_RXCSUM) {
2594                         if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2595                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2596                                 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2597                                         m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2598                         }
2599                         if ((cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) &&
2600                             m->m_pkthdr.len >= BGE_MIN_FRAME) {
2601                                 m->m_pkthdr.csum_data =
2602                                         cur_rx->bge_tcp_udp_csum;
2603                                 m->m_pkthdr.csum_flags |=
2604                                         CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2605                         }
2606                 }
2607
2608                 /*
2609                  * If we received a packet with a vlan tag, pass it
2610                  * to vlan_input() instead of ether_input().
2611                  */
2612                 if (have_tag) {
2613                         m->m_flags |= M_VLANTAG;
2614                         m->m_pkthdr.ether_vlantag = vlan_tag;
2615                         have_tag = vlan_tag = 0;
2616                 }
2617                 ifp->if_input(ifp, m);
2618         }
2619
2620         bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2621         if (stdcnt)
2622                 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2623         if (jumbocnt)
2624                 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2625 }
2626
2627 static void
2628 bge_txeof(struct bge_softc *sc)
2629 {
2630         struct bge_tx_bd *cur_tx = NULL;
2631         struct ifnet *ifp;
2632
2633         if (sc->bge_tx_saved_considx ==
2634             sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
2635                 return;
2636
2637         ifp = &sc->arpcom.ac_if;
2638
2639         /*
2640          * Go through our tx ring and free mbufs for those
2641          * frames that have been sent.
2642          */
2643         while (sc->bge_tx_saved_considx !=
2644                sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2645                 uint32_t idx = 0;
2646
2647                 idx = sc->bge_tx_saved_considx;
2648                 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2649                 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2650                         ifp->if_opackets++;
2651                 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2652                         bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
2653                             sc->bge_cdata.bge_tx_dmamap[idx]);
2654                         m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2655                         sc->bge_cdata.bge_tx_chain[idx] = NULL;
2656                 }
2657                 sc->bge_txcnt--;
2658                 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2659                 logif(tx_pkt);
2660         }
2661
2662         if (cur_tx != NULL &&
2663             (BGE_TX_RING_CNT - sc->bge_txcnt) >=
2664             (BGE_NSEG_RSVD + BGE_NSEG_SPARE))
2665                 ifp->if_flags &= ~IFF_OACTIVE;
2666
2667         if (sc->bge_txcnt == 0)
2668                 ifp->if_timer = 0;
2669
2670         if (!ifq_is_empty(&ifp->if_snd))
2671                 if_devstart(ifp);
2672 }
2673
2674 #ifdef DEVICE_POLLING
2675
2676 static void
2677 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2678 {
2679         struct bge_softc *sc = ifp->if_softc;
2680         uint32_t status;
2681
2682         switch(cmd) {
2683         case POLL_REGISTER:
2684                 bge_disable_intr(sc);
2685                 break;
2686         case POLL_DEREGISTER:
2687                 bge_enable_intr(sc);
2688                 break;
2689         case POLL_AND_CHECK_STATUS:
2690                 /*
2691                  * Process link state changes.
2692                  */
2693                 status = CSR_READ_4(sc, BGE_MAC_STS);
2694                 if ((status & sc->bge_link_chg) || sc->bge_link_evt) {
2695                         sc->bge_link_evt = 0;
2696                         sc->bge_link_upd(sc, status);
2697                 }
2698                 /* fall through */
2699         case POLL_ONLY:
2700                 if (ifp->if_flags & IFF_RUNNING) {
2701                         bge_rxeof(sc);
2702                         bge_txeof(sc);
2703                 }
2704                 break;
2705         }
2706 }
2707
2708 #endif
2709
2710 static void
2711 bge_intr(void *xsc)
2712 {
2713         struct bge_softc *sc = xsc;
2714         struct ifnet *ifp = &sc->arpcom.ac_if;
2715         uint32_t status;
2716
2717         logif(intr);
2718
2719         /*
2720          * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO.  Don't
2721          * disable interrupts by writing nonzero like we used to, since with
2722          * our current organization this just gives complications and
2723          * pessimizations for re-enabling interrupts.  We used to have races
2724          * instead of the necessary complications.  Disabling interrupts
2725          * would just reduce the chance of a status update while we are
2726          * running (by switching to the interrupt-mode coalescence
2727          * parameters), but this chance is already very low so it is more
2728          * efficient to get another interrupt than prevent it.
2729          *
2730          * We do the ack first to ensure another interrupt if there is a
2731          * status update after the ack.  We don't check for the status
2732          * changing later because it is more efficient to get another
2733          * interrupt than prevent it, not quite as above (not checking is
2734          * a smaller optimization than not toggling the interrupt enable,
2735          * since checking doesn't involve PCI accesses and toggling require
2736          * the status check).  So toggling would probably be a pessimization
2737          * even with MSI.  It would only be needed for using a task queue.
2738          */
2739         bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
2740
2741         /*
2742          * Process link state changes.
2743          */
2744         status = CSR_READ_4(sc, BGE_MAC_STS);
2745         if ((status & sc->bge_link_chg) || sc->bge_link_evt) {
2746                 sc->bge_link_evt = 0;
2747                 sc->bge_link_upd(sc, status);
2748         }
2749
2750         if (ifp->if_flags & IFF_RUNNING) {
2751                 /* Check RX return ring producer/consumer */
2752                 bge_rxeof(sc);
2753
2754                 /* Check TX ring producer/consumer */
2755                 bge_txeof(sc);
2756         }
2757
2758         if (sc->bge_coal_chg)
2759                 bge_coal_change(sc);
2760 }
2761
2762 static void
2763 bge_tick(void *xsc)
2764 {
2765         struct bge_softc *sc = xsc;
2766         struct ifnet *ifp = &sc->arpcom.ac_if;
2767
2768         lwkt_serialize_enter(ifp->if_serializer);
2769
2770         if (BGE_IS_5705_PLUS(sc))
2771                 bge_stats_update_regs(sc);
2772         else
2773                 bge_stats_update(sc);
2774
2775         if (sc->bge_flags & BGE_FLAG_TBI) {
2776                 /*
2777                  * Since in TBI mode auto-polling can't be used we should poll
2778                  * link status manually. Here we register pending link event
2779                  * and trigger interrupt.
2780                  */
2781                 sc->bge_link_evt++;
2782                 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
2783         } else if (!sc->bge_link) {
2784                 mii_tick(device_get_softc(sc->bge_miibus));
2785         }
2786
2787         callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2788
2789         lwkt_serialize_exit(ifp->if_serializer);
2790 }
2791
2792 static void
2793 bge_stats_update_regs(struct bge_softc *sc)
2794 {
2795         struct ifnet *ifp = &sc->arpcom.ac_if;
2796         struct bge_mac_stats_regs stats;
2797         uint32_t *s;
2798         int i;
2799
2800         s = (uint32_t *)&stats;
2801         for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2802                 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2803                 s++;
2804         }
2805
2806         ifp->if_collisions +=
2807            (stats.dot3StatsSingleCollisionFrames +
2808            stats.dot3StatsMultipleCollisionFrames +
2809            stats.dot3StatsExcessiveCollisions +
2810            stats.dot3StatsLateCollisions) -
2811            ifp->if_collisions;
2812 }
2813
2814 static void
2815 bge_stats_update(struct bge_softc *sc)
2816 {
2817         struct ifnet *ifp = &sc->arpcom.ac_if;
2818         bus_size_t stats;
2819
2820         stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2821
2822 #define READ_STAT(sc, stats, stat)      \
2823         CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2824
2825         ifp->if_collisions +=
2826            (READ_STAT(sc, stats,
2827                 txstats.dot3StatsSingleCollisionFrames.bge_addr_lo) +
2828             READ_STAT(sc, stats,
2829                 txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo) +
2830             READ_STAT(sc, stats,
2831                 txstats.dot3StatsExcessiveCollisions.bge_addr_lo) +
2832             READ_STAT(sc, stats,
2833                 txstats.dot3StatsLateCollisions.bge_addr_lo)) -
2834            ifp->if_collisions;
2835
2836 #undef READ_STAT
2837
2838 #ifdef notdef
2839         ifp->if_collisions +=
2840            (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2841            sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2842            sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2843            sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2844            ifp->if_collisions;
2845 #endif
2846 }
2847
2848 /*
2849  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
2850  * pointers to descriptors.
2851  */
2852 static int
2853 bge_encap(struct bge_softc *sc, struct mbuf **m_head0, uint32_t *txidx)
2854 {
2855         struct bge_tx_bd *d = NULL;
2856         uint16_t csum_flags = 0;
2857         bus_dma_segment_t segs[BGE_NSEG_NEW];
2858         bus_dmamap_t map;
2859         int error, maxsegs, nsegs, idx, i;
2860         struct mbuf *m_head = *m_head0;
2861
2862         if (m_head->m_pkthdr.csum_flags) {
2863                 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2864                         csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2865                 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2866                         csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2867                 if (m_head->m_flags & M_LASTFRAG)
2868                         csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2869                 else if (m_head->m_flags & M_FRAG)
2870                         csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2871         }
2872
2873         idx = *txidx;
2874         map = sc->bge_cdata.bge_tx_dmamap[idx];
2875
2876         maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - BGE_NSEG_RSVD;
2877         KASSERT(maxsegs >= BGE_NSEG_SPARE,
2878                 ("not enough segments %d", maxsegs));
2879
2880         if (maxsegs > BGE_NSEG_NEW)
2881                 maxsegs = BGE_NSEG_NEW;
2882
2883         /*
2884          * Pad outbound frame to BGE_MIN_FRAME for an unusual reason.
2885          * The bge hardware will pad out Tx runts to BGE_MIN_FRAME,
2886          * but when such padded frames employ the bge IP/TCP checksum
2887          * offload, the hardware checksum assist gives incorrect results
2888          * (possibly from incorporating its own padding into the UDP/TCP
2889          * checksum; who knows).  If we pad such runts with zeros, the
2890          * onboard checksum comes out correct.
2891          */
2892         if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
2893             m_head->m_pkthdr.len < BGE_MIN_FRAME) {
2894                 error = m_devpad(m_head, BGE_MIN_FRAME);
2895                 if (error)
2896                         goto back;
2897         }
2898
2899         if (sc->bge_force_defrag && (sc->bge_flags & BGE_FLAG_PCIE) &&
2900             m_head->m_next != NULL) {
2901                 struct mbuf *m_new;
2902
2903                 /*
2904                  * Forcefully defragment mbuf chain to overcome hardware
2905                  * limitation which only support a single outstanding
2906                  * DMA read operation.  If it fails, keep moving on using
2907                  * the original mbuf chain.
2908                  */
2909                 m_new = m_defrag(m_head, MB_DONTWAIT);
2910                 if (m_new != NULL)
2911                         *m_head0 = m_head = m_new;
2912         }
2913
2914         error = bus_dmamap_load_mbuf_defrag(sc->bge_cdata.bge_tx_mtag, map,
2915                         m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
2916         if (error)
2917                 goto back;
2918
2919         m_head = *m_head0;
2920         bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
2921
2922         for (i = 0; ; i++) {
2923                 d = &sc->bge_ldata.bge_tx_ring[idx];
2924
2925                 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
2926                 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
2927                 d->bge_len = segs[i].ds_len;
2928                 d->bge_flags = csum_flags;
2929
2930                 if (i == nsegs - 1)
2931                         break;
2932                 BGE_INC(idx, BGE_TX_RING_CNT);
2933         }
2934         /* Mark the last segment as end of packet... */
2935         d->bge_flags |= BGE_TXBDFLAG_END;
2936
2937         /* Set vlan tag to the first segment of the packet. */
2938         d = &sc->bge_ldata.bge_tx_ring[*txidx];
2939         if (m_head->m_flags & M_VLANTAG) {
2940                 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2941                 d->bge_vlan_tag = m_head->m_pkthdr.ether_vlantag;
2942         } else {
2943                 d->bge_vlan_tag = 0;
2944         }
2945
2946         /*
2947          * Insure that the map for this transmission is placed at
2948          * the array index of the last descriptor in this chain.
2949          */
2950         sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
2951         sc->bge_cdata.bge_tx_dmamap[idx] = map;
2952         sc->bge_cdata.bge_tx_chain[idx] = m_head;
2953         sc->bge_txcnt += nsegs;
2954
2955         BGE_INC(idx, BGE_TX_RING_CNT);
2956         *txidx = idx;
2957 back:
2958         if (error) {
2959                 m_freem(*m_head0);
2960                 *m_head0 = NULL;
2961         }
2962         return error;
2963 }
2964
2965 /*
2966  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2967  * to the mbuf data regions directly in the transmit descriptors.
2968  */
2969 static void
2970 bge_start(struct ifnet *ifp)
2971 {
2972         struct bge_softc *sc = ifp->if_softc;
2973         struct mbuf *m_head = NULL;
2974         uint32_t prodidx;
2975         int need_trans;
2976
2977         if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
2978                 return;
2979
2980         prodidx = sc->bge_tx_prodidx;
2981
2982         need_trans = 0;
2983         while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
2984                 m_head = ifq_dequeue(&ifp->if_snd, NULL);
2985                 if (m_head == NULL)
2986                         break;
2987
2988                 /*
2989                  * XXX
2990                  * The code inside the if() block is never reached since we
2991                  * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
2992                  * requests to checksum TCP/UDP in a fragmented packet.
2993                  * 
2994                  * XXX
2995                  * safety overkill.  If this is a fragmented packet chain
2996                  * with delayed TCP/UDP checksums, then only encapsulate
2997                  * it if we have enough descriptors to handle the entire
2998                  * chain at once.
2999                  * (paranoia -- may not actually be needed)
3000                  */
3001                 if ((m_head->m_flags & M_FIRSTFRAG) &&
3002                     (m_head->m_pkthdr.csum_flags & CSUM_DELAY_DATA)) {
3003                         if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3004                             m_head->m_pkthdr.csum_data + BGE_NSEG_RSVD) {
3005                                 ifp->if_flags |= IFF_OACTIVE;
3006                                 ifq_prepend(&ifp->if_snd, m_head);
3007                                 break;
3008                         }
3009                 }
3010
3011                 /*
3012                  * Sanity check: avoid coming within BGE_NSEG_RSVD
3013                  * descriptors of the end of the ring.  Also make
3014                  * sure there are BGE_NSEG_SPARE descriptors for
3015                  * jumbo buffers' defragmentation.
3016                  */
3017                 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3018                     (BGE_NSEG_RSVD + BGE_NSEG_SPARE)) {
3019                         ifp->if_flags |= IFF_OACTIVE;
3020                         ifq_prepend(&ifp->if_snd, m_head);
3021                         break;
3022                 }
3023
3024                 /*
3025                  * Pack the data into the transmit ring. If we
3026                  * don't have room, set the OACTIVE flag and wait
3027                  * for the NIC to drain the ring.
3028                  */
3029                 if (bge_encap(sc, &m_head, &prodidx)) {
3030                         ifp->if_flags |= IFF_OACTIVE;
3031                         ifp->if_oerrors++;
3032                         break;
3033                 }
3034                 need_trans = 1;
3035
3036                 ETHER_BPF_MTAP(ifp, m_head);
3037         }
3038
3039         if (!need_trans)
3040                 return;
3041
3042         /* Transmit */
3043         bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3044         /* 5700 b2 errata */
3045         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3046                 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3047
3048         sc->bge_tx_prodidx = prodidx;
3049
3050         /*
3051          * Set a timeout in case the chip goes out to lunch.
3052          */
3053         ifp->if_timer = 5;
3054 }
3055
3056 static void
3057 bge_init(void *xsc)
3058 {
3059         struct bge_softc *sc = xsc;
3060         struct ifnet *ifp = &sc->arpcom.ac_if;
3061         uint16_t *m;
3062
3063         ASSERT_SERIALIZED(ifp->if_serializer);
3064
3065         /* Cancel pending I/O and flush buffers. */
3066         bge_stop(sc);
3067         bge_reset(sc);
3068         bge_chipinit(sc);
3069
3070         /*
3071          * Init the various state machines, ring
3072          * control blocks and firmware.
3073          */
3074         if (bge_blockinit(sc)) {
3075                 if_printf(ifp, "initialization failure\n");
3076                 bge_stop(sc);
3077                 return;
3078         }
3079
3080         /* Specify MTU. */
3081         CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3082             ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
3083
3084         /* Load our MAC address. */
3085         m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
3086         CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3087         CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3088
3089         /* Enable or disable promiscuous mode as needed. */
3090         bge_setpromisc(sc);
3091
3092         /* Program multicast filter. */
3093         bge_setmulti(sc);
3094
3095         /* Init RX ring. */
3096         if (bge_init_rx_ring_std(sc)) {
3097                 if_printf(ifp, "RX ring initialization failed\n");
3098                 bge_stop(sc);
3099                 return;
3100         }
3101
3102         /*
3103          * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3104          * memory to insure that the chip has in fact read the first
3105          * entry of the ring.
3106          */
3107         if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3108                 uint32_t                v, i;
3109                 for (i = 0; i < 10; i++) {
3110                         DELAY(20);
3111                         v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3112                         if (v == (MCLBYTES - ETHER_ALIGN))
3113                                 break;
3114                 }
3115                 if (i == 10)
3116                         if_printf(ifp, "5705 A0 chip failed to load RX ring\n");
3117         }
3118
3119         /* Init jumbo RX ring. */
3120         if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) {
3121                 if (bge_init_rx_ring_jumbo(sc)) {
3122                         if_printf(ifp, "Jumbo RX ring initialization failed\n");
3123                         bge_stop(sc);
3124                         return;
3125                 }
3126         }
3127
3128         /* Init our RX return ring index */
3129         sc->bge_rx_saved_considx = 0;
3130
3131         /* Init TX ring. */
3132         bge_init_tx_ring(sc);
3133
3134         /* Turn on transmitter */
3135         BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3136
3137         /* Turn on receiver */
3138         BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3139
3140         /*
3141          * Set the number of good frames to receive after RX MBUF
3142          * Low Watermark has been reached.  After the RX MAC receives
3143          * this number of frames, it will drop subsequent incoming
3144          * frames until the MBUF High Watermark is reached.
3145          */
3146         CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
3147
3148         /* Tell firmware we're alive. */
3149         BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3150
3151         /* Enable host interrupts if polling(4) is not enabled. */
3152         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3153 #ifdef DEVICE_POLLING
3154         if (ifp->if_flags & IFF_POLLING)
3155                 bge_disable_intr(sc);
3156         else
3157 #endif
3158         bge_enable_intr(sc);
3159
3160         bge_ifmedia_upd(ifp);
3161
3162         ifp->if_flags |= IFF_RUNNING;
3163         ifp->if_flags &= ~IFF_OACTIVE;
3164
3165         callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
3166 }
3167
3168 /*
3169  * Set media options.
3170  */
3171 static int
3172 bge_ifmedia_upd(struct ifnet *ifp)
3173 {
3174         struct bge_softc *sc = ifp->if_softc;
3175
3176         /* If this is a 1000baseX NIC, enable the TBI port. */
3177         if (sc->bge_flags & BGE_FLAG_TBI) {
3178                 struct ifmedia *ifm = &sc->bge_ifmedia;
3179
3180                 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3181                         return(EINVAL);
3182
3183                 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3184                 case IFM_AUTO:
3185                         /*
3186                          * The BCM5704 ASIC appears to have a special
3187                          * mechanism for programming the autoneg
3188                          * advertisement registers in TBI mode.
3189                          */
3190                         if (!bge_fake_autoneg &&
3191                             sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3192                                 uint32_t sgdig;
3193
3194                                 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3195                                 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3196                                 sgdig |= BGE_SGDIGCFG_AUTO |
3197                                          BGE_SGDIGCFG_PAUSE_CAP |
3198                                          BGE_SGDIGCFG_ASYM_PAUSE;
3199                                 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3200                                             sgdig | BGE_SGDIGCFG_SEND);
3201                                 DELAY(5);
3202                                 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3203                         }
3204                         break;
3205                 case IFM_1000_SX:
3206                         if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3207                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
3208                                     BGE_MACMODE_HALF_DUPLEX);
3209                         } else {
3210                                 BGE_SETBIT(sc, BGE_MAC_MODE,
3211                                     BGE_MACMODE_HALF_DUPLEX);
3212                         }
3213                         break;
3214                 default:
3215                         return(EINVAL);
3216                 }
3217         } else {
3218                 struct mii_data *mii = device_get_softc(sc->bge_miibus);
3219
3220                 sc->bge_link_evt++;
3221                 sc->bge_link = 0;
3222                 if (mii->mii_instance) {
3223                         struct mii_softc *miisc;
3224
3225                         LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3226                                 mii_phy_reset(miisc);
3227                 }
3228                 mii_mediachg(mii);
3229         }
3230         return(0);
3231 }
3232
3233 /*
3234  * Report current media status.
3235  */
3236 static void
3237 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3238 {
3239         struct bge_softc *sc = ifp->if_softc;
3240
3241         if (sc->bge_flags & BGE_FLAG_TBI) {
3242                 ifmr->ifm_status = IFM_AVALID;
3243                 ifmr->ifm_active = IFM_ETHER;
3244                 if (CSR_READ_4(sc, BGE_MAC_STS) &
3245                     BGE_MACSTAT_TBI_PCS_SYNCHED) {
3246                         ifmr->ifm_status |= IFM_ACTIVE;
3247                 } else {
3248                         ifmr->ifm_active |= IFM_NONE;
3249                         return;
3250                 }
3251
3252                 ifmr->ifm_active |= IFM_1000_SX;
3253                 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3254                         ifmr->ifm_active |= IFM_HDX;    
3255                 else
3256                         ifmr->ifm_active |= IFM_FDX;
3257         } else {
3258                 struct mii_data *mii = device_get_softc(sc->bge_miibus);
3259
3260                 mii_pollstat(mii);
3261                 ifmr->ifm_active = mii->mii_media_active;
3262                 ifmr->ifm_status = mii->mii_media_status;
3263         }
3264 }
3265
3266 static int
3267 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
3268 {
3269         struct bge_softc *sc = ifp->if_softc;
3270         struct ifreq *ifr = (struct ifreq *)data;
3271         int mask, error = 0;
3272
3273         ASSERT_SERIALIZED(ifp->if_serializer);
3274
3275         switch (command) {
3276         case SIOCSIFMTU:
3277                 if ((!BGE_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) ||
3278                     (BGE_IS_JUMBO_CAPABLE(sc) &&
3279                      ifr->ifr_mtu > BGE_JUMBO_MTU)) {
3280                         error = EINVAL;
3281                 } else if (ifp->if_mtu != ifr->ifr_mtu) {
3282                         ifp->if_mtu = ifr->ifr_mtu;
3283                         if (ifp->if_flags & IFF_RUNNING)
3284                                 bge_init(sc);
3285                 }
3286                 break;
3287         case SIOCSIFFLAGS:
3288                 if (ifp->if_flags & IFF_UP) {
3289                         if (ifp->if_flags & IFF_RUNNING) {
3290                                 mask = ifp->if_flags ^ sc->bge_if_flags;
3291
3292                                 /*
3293                                  * If only the state of the PROMISC flag
3294                                  * changed, then just use the 'set promisc
3295                                  * mode' command instead of reinitializing
3296                                  * the entire NIC. Doing a full re-init
3297                                  * means reloading the firmware and waiting
3298                                  * for it to start up, which may take a
3299                                  * second or two.  Similarly for ALLMULTI.
3300                                  */
3301                                 if (mask & IFF_PROMISC)
3302                                         bge_setpromisc(sc);
3303                                 if (mask & IFF_ALLMULTI)
3304                                         bge_setmulti(sc);
3305                         } else {
3306                                 bge_init(sc);
3307                         }
3308                 } else if (ifp->if_flags & IFF_RUNNING) {
3309                         bge_stop(sc);
3310                 }
3311                 sc->bge_if_flags = ifp->if_flags;
3312                 break;
3313         case SIOCADDMULTI:
3314         case SIOCDELMULTI:
3315                 if (ifp->if_flags & IFF_RUNNING)
3316                         bge_setmulti(sc);
3317                 break;
3318         case SIOCSIFMEDIA:
3319         case SIOCGIFMEDIA:
3320                 if (sc->bge_flags & BGE_FLAG_TBI) {
3321                         error = ifmedia_ioctl(ifp, ifr,
3322                             &sc->bge_ifmedia, command);
3323                 } else {
3324                         struct mii_data *mii;
3325
3326                         mii = device_get_softc(sc->bge_miibus);
3327                         error = ifmedia_ioctl(ifp, ifr,
3328                                               &mii->mii_media, command);
3329                 }
3330                 break;
3331         case SIOCSIFCAP:
3332                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3333                 if (mask & IFCAP_HWCSUM) {
3334                         ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
3335                         if (IFCAP_HWCSUM & ifp->if_capenable)
3336                                 ifp->if_hwassist = BGE_CSUM_FEATURES;
3337                         else
3338                                 ifp->if_hwassist = 0;
3339                 }
3340                 break;
3341         default:
3342                 error = ether_ioctl(ifp, command, data);
3343                 break;
3344         }
3345         return error;
3346 }
3347
3348 static void
3349 bge_watchdog(struct ifnet *ifp)
3350 {
3351         struct bge_softc *sc = ifp->if_softc;
3352
3353         if_printf(ifp, "watchdog timeout -- resetting\n");
3354
3355         bge_init(sc);
3356
3357         ifp->if_oerrors++;
3358
3359         if (!ifq_is_empty(&ifp->if_snd))
3360                 if_devstart(ifp);
3361 }
3362
3363 /*
3364  * Stop the adapter and free any mbufs allocated to the
3365  * RX and TX lists.
3366  */
3367 static void
3368 bge_stop(struct bge_softc *sc)
3369 {
3370         struct ifnet *ifp = &sc->arpcom.ac_if;
3371
3372         ASSERT_SERIALIZED(ifp->if_serializer);
3373
3374         callout_stop(&sc->bge_stat_timer);
3375
3376         /*
3377          * Disable all of the receiver blocks
3378          */
3379         BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3380         BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3381         BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3382         if (!BGE_IS_5705_PLUS(sc))
3383                 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3384         BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3385         BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3386         BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3387
3388         /*
3389          * Disable all of the transmit blocks
3390          */
3391         BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3392         BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3393         BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3394         BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3395         BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3396         if (!BGE_IS_5705_PLUS(sc))
3397                 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3398         BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3399
3400         /*
3401          * Shut down all of the memory managers and related
3402          * state machines.
3403          */
3404         BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3405         BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3406         if (!BGE_IS_5705_PLUS(sc))
3407                 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3408         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3409         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3410         if (!BGE_IS_5705_PLUS(sc)) {
3411                 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3412                 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3413         }
3414
3415         /* Disable host interrupts. */
3416         bge_disable_intr(sc);
3417
3418         /*
3419          * Tell firmware we're shutting down.
3420          */
3421         BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3422
3423         /* Free the RX lists. */
3424         bge_free_rx_ring_std(sc);
3425
3426         /* Free jumbo RX list. */
3427         if (BGE_IS_JUMBO_CAPABLE(sc))
3428                 bge_free_rx_ring_jumbo(sc);
3429
3430         /* Free TX buffers. */
3431         bge_free_tx_ring(sc);
3432
3433         sc->bge_link = 0;
3434         sc->bge_coal_chg = 0;
3435
3436         sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3437
3438         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3439         ifp->if_timer = 0;
3440 }
3441
3442 /*
3443  * Stop all chip I/O so that the kernel's probe routines don't
3444  * get confused by errant DMAs when rebooting.
3445  */
3446 static void
3447 bge_shutdown(device_t dev)
3448 {
3449         struct bge_softc *sc = device_get_softc(dev);
3450         struct ifnet *ifp = &sc->arpcom.ac_if;
3451
3452         lwkt_serialize_enter(ifp->if_serializer);
3453         bge_stop(sc);
3454         bge_reset(sc);
3455         lwkt_serialize_exit(ifp->if_serializer);
3456 }
3457
3458 static int
3459 bge_suspend(device_t dev)
3460 {
3461         struct bge_softc *sc = device_get_softc(dev);
3462         struct ifnet *ifp = &sc->arpcom.ac_if;
3463
3464         lwkt_serialize_enter(ifp->if_serializer);
3465         bge_stop(sc);
3466         lwkt_serialize_exit(ifp->if_serializer);
3467
3468         return 0;
3469 }
3470
3471 static int
3472 bge_resume(device_t dev)
3473 {
3474         struct bge_softc *sc = device_get_softc(dev);
3475         struct ifnet *ifp = &sc->arpcom.ac_if;
3476
3477         lwkt_serialize_enter(ifp->if_serializer);
3478
3479         if (ifp->if_flags & IFF_UP) {
3480                 bge_init(sc);
3481
3482                 if (!ifq_is_empty(&ifp->if_snd))
3483                         if_devstart(ifp);
3484         }
3485
3486         lwkt_serialize_exit(ifp->if_serializer);
3487
3488         return 0;
3489 }
3490
3491 static void
3492 bge_setpromisc(struct bge_softc *sc)
3493 {
3494         struct ifnet *ifp = &sc->arpcom.ac_if;
3495
3496         if (ifp->if_flags & IFF_PROMISC)
3497                 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3498         else
3499                 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3500 }
3501
3502 static void
3503 bge_dma_free(struct bge_softc *sc)
3504 {
3505         int i;
3506
3507         /* Destroy RX mbuf DMA stuffs. */
3508         if (sc->bge_cdata.bge_rx_mtag != NULL) {
3509                 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3510                         bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
3511                             sc->bge_cdata.bge_rx_std_dmamap[i]);
3512                 }
3513                 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
3514                                    sc->bge_cdata.bge_rx_tmpmap);
3515                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
3516         }
3517
3518         /* Destroy TX mbuf DMA stuffs. */
3519         if (sc->bge_cdata.bge_tx_mtag != NULL) {
3520                 for (i = 0; i < BGE_TX_RING_CNT; i++) {
3521                         bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
3522                             sc->bge_cdata.bge_tx_dmamap[i]);
3523                 }
3524                 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
3525         }
3526
3527         /* Destroy standard RX ring */
3528         bge_dma_block_free(sc->bge_cdata.bge_rx_std_ring_tag,
3529                            sc->bge_cdata.bge_rx_std_ring_map,
3530                            sc->bge_ldata.bge_rx_std_ring);
3531
3532         if (BGE_IS_JUMBO_CAPABLE(sc))
3533                 bge_free_jumbo_mem(sc);
3534
3535         /* Destroy RX return ring */
3536         bge_dma_block_free(sc->bge_cdata.bge_rx_return_ring_tag,
3537                            sc->bge_cdata.bge_rx_return_ring_map,
3538                            sc->bge_ldata.bge_rx_return_ring);
3539
3540         /* Destroy TX ring */
3541         bge_dma_block_free(sc->bge_cdata.bge_tx_ring_tag,
3542                            sc->bge_cdata.bge_tx_ring_map,
3543                            sc->bge_ldata.bge_tx_ring);
3544
3545         /* Destroy status block */
3546         bge_dma_block_free(sc->bge_cdata.bge_status_tag,
3547                            sc->bge_cdata.bge_status_map,
3548                            sc->bge_ldata.bge_status_block);
3549
3550         /* Destroy statistics block */
3551         bge_dma_block_free(sc->bge_cdata.bge_stats_tag,
3552                            sc->bge_cdata.bge_stats_map,
3553                            sc->bge_ldata.bge_stats);
3554
3555         /* Destroy the parent tag */
3556         if (sc->bge_cdata.bge_parent_tag != NULL)
3557                 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
3558 }
3559
3560 static int
3561 bge_dma_alloc(struct bge_softc *sc)
3562 {
3563         struct ifnet *ifp = &sc->arpcom.ac_if;
3564         int i, error;
3565         bus_addr_t lowaddr;
3566
3567         lowaddr = BUS_SPACE_MAXADDR;
3568         if (sc->bge_flags & BGE_FLAG_MAXADDR_40BIT)
3569                 lowaddr = BGE_DMA_MAXADDR_40BIT;
3570
3571         /*
3572          * Allocate the parent bus DMA tag appropriate for PCI.
3573          *
3574          * All of the NetExtreme/NetLink controllers have 4GB boundary
3575          * DMA bug.
3576          * Whenever an address crosses a multiple of the 4GB boundary
3577          * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
3578          * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
3579          * state machine will lockup and cause the device to hang.
3580          */
3581         error = bus_dma_tag_create(NULL, 1, BGE_DMA_BOUNDARY_4G,
3582                                    lowaddr, BUS_SPACE_MAXADDR,
3583                                    NULL, NULL,
3584                                    BUS_SPACE_MAXSIZE_32BIT, 0,
3585                                    BUS_SPACE_MAXSIZE_32BIT,
3586                                    0, &sc->bge_cdata.bge_parent_tag);
3587         if (error) {
3588                 if_printf(ifp, "could not allocate parent dma tag\n");
3589                 return error;
3590         }
3591
3592         /*
3593          * Create DMA tag and maps for RX mbufs.
3594          */
3595         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0,
3596                                    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3597                                    NULL, NULL, MCLBYTES, 1, MCLBYTES,
3598                                    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
3599                                    &sc->bge_cdata.bge_rx_mtag);
3600         if (error) {
3601                 if_printf(ifp, "could not allocate RX mbuf dma tag\n");
3602                 return error;
3603         }
3604
3605         error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag,
3606                                   BUS_DMA_WAITOK, &sc->bge_cdata.bge_rx_tmpmap);
3607         if (error) {
3608                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
3609                 sc->bge_cdata.bge_rx_mtag = NULL;
3610                 return error;
3611         }
3612
3613         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3614                 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag,
3615                                           BUS_DMA_WAITOK,
3616                                           &sc->bge_cdata.bge_rx_std_dmamap[i]);
3617                 if (error) {
3618                         int j;
3619
3620                         for (j = 0; j < i; ++j) {
3621                                 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
3622                                         sc->bge_cdata.bge_rx_std_dmamap[j]);
3623                         }
3624                         bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
3625                         sc->bge_cdata.bge_rx_mtag = NULL;
3626
3627                         if_printf(ifp, "could not create DMA map for RX\n");
3628                         return error;
3629                 }
3630         }
3631
3632         /*
3633          * Create DMA tag and maps for TX mbufs.
3634          */
3635         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0,
3636                                    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3637                                    NULL, NULL,
3638                                    BGE_JUMBO_FRAMELEN, BGE_NSEG_NEW, MCLBYTES,
3639                                    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK |
3640                                    BUS_DMA_ONEBPAGE,
3641                                    &sc->bge_cdata.bge_tx_mtag);
3642         if (error) {
3643                 if_printf(ifp, "could not allocate TX mbuf dma tag\n");
3644                 return error;
3645         }
3646
3647         for (i = 0; i < BGE_TX_RING_CNT; i++) {
3648                 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag,
3649                                           BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
3650                                           &sc->bge_cdata.bge_tx_dmamap[i]);
3651                 if (error) {
3652                         int j;
3653
3654                         for (j = 0; j < i; ++j) {
3655                                 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
3656                                         sc->bge_cdata.bge_tx_dmamap[j]);
3657                         }
3658                         bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
3659                         sc->bge_cdata.bge_tx_mtag = NULL;
3660
3661                         if_printf(ifp, "could not create DMA map for TX\n");
3662                         return error;
3663                 }
3664         }
3665
3666         /*
3667          * Create DMA stuffs for standard RX ring.
3668          */
3669         error = bge_dma_block_alloc(sc, BGE_STD_RX_RING_SZ,
3670                                     &sc->bge_cdata.bge_rx_std_ring_tag,
3671                                     &sc->bge_cdata.bge_rx_std_ring_map,
3672                                     (void *)&sc->bge_ldata.bge_rx_std_ring,
3673                                     &sc->bge_ldata.bge_rx_std_ring_paddr);
3674         if (error) {
3675                 if_printf(ifp, "could not create std RX ring\n");
3676                 return error;
3677         }
3678
3679         /*
3680          * Create jumbo buffer pool.
3681          */
3682         if (BGE_IS_JUMBO_CAPABLE(sc)) {
3683                 error = bge_alloc_jumbo_mem(sc);
3684                 if (error) {
3685                         if_printf(ifp, "could not create jumbo buffer pool\n");
3686                         return error;
3687                 }
3688         }
3689
3690         /*
3691          * Create DMA stuffs for RX return ring.
3692          */
3693         error = bge_dma_block_alloc(sc, BGE_RX_RTN_RING_SZ(sc),
3694                                     &sc->bge_cdata.bge_rx_return_ring_tag,
3695                                     &sc->bge_cdata.bge_rx_return_ring_map,
3696                                     (void *)&sc->bge_ldata.bge_rx_return_ring,
3697                                     &sc->bge_ldata.bge_rx_return_ring_paddr);
3698         if (error) {
3699                 if_printf(ifp, "could not create RX ret ring\n");
3700                 return error;
3701         }
3702
3703         /*
3704          * Create DMA stuffs for TX ring.
3705          */
3706         error = bge_dma_block_alloc(sc, BGE_TX_RING_SZ,
3707                                     &sc->bge_cdata.bge_tx_ring_tag,
3708                                     &sc->bge_cdata.bge_tx_ring_map,
3709                                     (void *)&sc->bge_ldata.bge_tx_ring,
3710                                     &sc->bge_ldata.bge_tx_ring_paddr);
3711         if (error) {
3712                 if_printf(ifp, "could not create TX ring\n");
3713                 return error;
3714         }
3715
3716         /*
3717          * Create DMA stuffs for status block.
3718          */
3719         error = bge_dma_block_alloc(sc, BGE_STATUS_BLK_SZ,
3720                                     &sc->bge_cdata.bge_status_tag,
3721                                     &sc->bge_cdata.bge_status_map,
3722                                     (void *)&sc->bge_ldata.bge_status_block,
3723                                     &sc->bge_ldata.bge_status_block_paddr);
3724         if (error) {
3725                 if_printf(ifp, "could not create status block\n");
3726                 return error;
3727         }
3728
3729         /*
3730          * Create DMA stuffs for statistics block.
3731          */
3732         error = bge_dma_block_alloc(sc, BGE_STATS_SZ,
3733                                     &sc->bge_cdata.bge_stats_tag,
3734                                     &sc->bge_cdata.bge_stats_map,
3735                                     (void *)&sc->bge_ldata.bge_stats,
3736                                     &sc->bge_ldata.bge_stats_paddr);
3737         if (error) {
3738                 if_printf(ifp, "could not create stats block\n");
3739                 return error;
3740         }
3741         return 0;
3742 }
3743
3744 static int
3745 bge_dma_block_alloc(struct bge_softc *sc, bus_size_t size, bus_dma_tag_t *tag,
3746                     bus_dmamap_t *map, void **addr, bus_addr_t *paddr)
3747 {
3748         bus_dmamem_t dmem;
3749         int error;
3750
3751         error = bus_dmamem_coherent(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0,
3752                                     BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3753                                     size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3754         if (error)
3755                 return error;
3756
3757         *tag = dmem.dmem_tag;
3758         *map = dmem.dmem_map;
3759         *addr = dmem.dmem_addr;
3760         *paddr = dmem.dmem_busaddr;
3761
3762         return 0;
3763 }
3764
3765 static void
3766 bge_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr)
3767 {
3768         if (tag != NULL) {
3769                 bus_dmamap_unload(tag, map);
3770                 bus_dmamem_free(tag, addr, map);
3771                 bus_dma_tag_destroy(tag);
3772         }
3773 }
3774
3775 /*
3776  * Grrr. The link status word in the status block does
3777  * not work correctly on the BCM5700 rev AX and BX chips,
3778  * according to all available information. Hence, we have
3779  * to enable MII interrupts in order to properly obtain
3780  * async link changes. Unfortunately, this also means that
3781  * we have to read the MAC status register to detect link
3782  * changes, thereby adding an additional register access to
3783  * the interrupt handler.
3784  *
3785  * XXX: perhaps link state detection procedure used for
3786  * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
3787  */
3788 static void
3789 bge_bcm5700_link_upd(struct bge_softc *sc, uint32_t status __unused)
3790 {
3791         struct ifnet *ifp = &sc->arpcom.ac_if;
3792         struct mii_data *mii = device_get_softc(sc->bge_miibus);
3793
3794         mii_pollstat(mii);
3795
3796         if (!sc->bge_link &&
3797             (mii->mii_media_status & IFM_ACTIVE) &&
3798             IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3799                 sc->bge_link++;
3800                 if (bootverbose)
3801                         if_printf(ifp, "link UP\n");
3802         } else if (sc->bge_link &&
3803             (!(mii->mii_media_status & IFM_ACTIVE) ||
3804             IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3805                 sc->bge_link = 0;
3806                 if (bootverbose)
3807                         if_printf(ifp, "link DOWN\n");
3808         }
3809
3810         /* Clear the interrupt. */
3811         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_MI_INTERRUPT);
3812         bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
3813         bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, BRGPHY_INTRS);
3814 }
3815
3816 static void
3817 bge_tbi_link_upd(struct bge_softc *sc, uint32_t status)
3818 {
3819         struct ifnet *ifp = &sc->arpcom.ac_if;
3820
3821 #define PCS_ENCODE_ERR  (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE)
3822
3823         /*
3824          * Sometimes PCS encoding errors are detected in
3825          * TBI mode (on fiber NICs), and for some reason
3826          * the chip will signal them as link changes.
3827          * If we get a link change event, but the 'PCS
3828          * encoding error' bit in the MAC status register
3829          * is set, don't bother doing a link check.
3830          * This avoids spurious "gigabit link up" messages
3831          * that sometimes appear on fiber NICs during
3832          * periods of heavy traffic.
3833          */
3834         if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3835                 if (!sc->bge_link) {
3836                         sc->bge_link++;
3837                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3838                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
3839                                     BGE_MACMODE_TBI_SEND_CFGS);
3840                         }
3841                         CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3842
3843                         if (bootverbose)
3844                                 if_printf(ifp, "link UP\n");
3845
3846                         ifp->if_link_state = LINK_STATE_UP;
3847                         if_link_state_change(ifp);
3848                 }
3849         } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) {
3850                 if (sc->bge_link) {
3851                         sc->bge_link = 0;
3852
3853                         if (bootverbose)
3854                                 if_printf(ifp, "link DOWN\n");
3855
3856                         ifp->if_link_state = LINK_STATE_DOWN;
3857                         if_link_state_change(ifp);
3858                 }
3859         }
3860
3861 #undef PCS_ENCODE_ERR
3862
3863         /* Clear the attention. */
3864         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3865             BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3866             BGE_MACSTAT_LINK_CHANGED);
3867 }
3868
3869 static void
3870 bge_copper_link_upd(struct bge_softc *sc, uint32_t status __unused)
3871 {
3872         /*
3873          * Check that the AUTOPOLL bit is set before
3874          * processing the event as a real link change.
3875          * Turning AUTOPOLL on and off in the MII read/write
3876          * functions will often trigger a link status
3877          * interrupt for no reason.
3878          */
3879         if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
3880                 struct ifnet *ifp = &sc->arpcom.ac_if;
3881                 struct mii_data *mii = device_get_softc(sc->bge_miibus);
3882
3883                 mii_pollstat(mii);
3884
3885                 if (!sc->bge_link &&
3886                     (mii->mii_media_status & IFM_ACTIVE) &&
3887                     IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3888                         sc->bge_link++;
3889                         if (bootverbose)
3890                                 if_printf(ifp, "link UP\n");
3891                 } else if (sc->bge_link &&
3892                     (!(mii->mii_media_status & IFM_ACTIVE) ||
3893                     IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3894                         sc->bge_link = 0;
3895                         if (bootverbose)
3896                                 if_printf(ifp, "link DOWN\n");
3897                 }
3898         }
3899
3900         /* Clear the attention. */
3901         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3902             BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3903             BGE_MACSTAT_LINK_CHANGED);
3904 }
3905
3906 static int
3907 bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS)
3908 {
3909         struct bge_softc *sc = arg1;
3910
3911         return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
3912                                    &sc->bge_rx_coal_ticks,
3913                                    BGE_RX_COAL_TICKS_CHG);
3914 }
3915
3916 static int
3917 bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS)
3918 {
3919         struct bge_softc *sc = arg1;
3920
3921         return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
3922                                    &sc->bge_tx_coal_ticks,
3923                                    BGE_TX_COAL_TICKS_CHG);
3924 }
3925
3926 static int
3927 bge_sysctl_rx_max_coal_bds(SYSCTL_HANDLER_ARGS)
3928 {
3929         struct bge_softc *sc = arg1;
3930
3931         return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
3932                                    &sc->bge_rx_max_coal_bds,
3933                                    BGE_RX_MAX_COAL_BDS_CHG);
3934 }
3935
3936 static int
3937 bge_sysctl_tx_max_coal_bds(SYSCTL_HANDLER_ARGS)
3938 {
3939         struct bge_softc *sc = arg1;
3940
3941         return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
3942                                    &sc->bge_tx_max_coal_bds,
3943                                    BGE_TX_MAX_COAL_BDS_CHG);
3944 }
3945
3946 static int
3947 bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal,
3948                     uint32_t coal_chg_mask)
3949 {
3950         struct bge_softc *sc = arg1;
3951         struct ifnet *ifp = &sc->arpcom.ac_if;
3952         int error = 0, v;
3953
3954         lwkt_serialize_enter(ifp->if_serializer);
3955
3956         v = *coal;
3957         error = sysctl_handle_int(oidp, &v, 0, req);
3958         if (!error && req->newptr != NULL) {
3959                 if (v < 0) {
3960                         error = EINVAL;
3961                 } else {
3962                         *coal = v;
3963                         sc->bge_coal_chg |= coal_chg_mask;