2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
34 * $DragonFly: src/sys/dev/netif/bge/if_bge.c,v 1.111 2008/10/22 14:24:24 sephe Exp $
39 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
41 * Written by Bill Paul <wpaul@windriver.com>
42 * Senior Engineer, Wind River Systems
46 * The Broadcom BCM5700 is based on technology originally developed by
47 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
49 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
51 * frames, highly configurable RX filtering, and 16 RX and TX queues
52 * (which, along with RX filter rules, can be used for QOS applications).
53 * Other features, such as TCP segmentation, may be available as part
54 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55 * firmware images can be stored in hardware and need not be compiled
58 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
61 * The BCM5701 is a single-chip solution incorporating both the BCM5700
62 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63 * does not support external SSRAM.
65 * Broadcom also produces a variation of the BCM5700 under the "Altima"
66 * brand name, which is functionally similar but lacks PCI-X support.
68 * Without external SSRAM, you can only have at most 4 TX rings,
69 * and the use of the mini RX ring is disabled. This seems to imply
70 * that these features are simply not available on the BCM5701. As a
71 * result, this driver does not implement any support for the mini RX
75 #include "opt_polling.h"
77 #include <sys/param.h>
79 #include <sys/endian.h>
80 #include <sys/kernel.h>
82 #include <sys/interrupt.h>
84 #include <sys/malloc.h>
85 #include <sys/queue.h>
87 #include <sys/serialize.h>
88 #include <sys/socket.h>
89 #include <sys/sockio.h>
90 #include <sys/sysctl.h>
93 #include <net/ethernet.h>
95 #include <net/if_arp.h>
96 #include <net/if_dl.h>
97 #include <net/if_media.h>
98 #include <net/if_types.h>
99 #include <net/ifq_var.h>
100 #include <net/vlan/if_vlan_var.h>
101 #include <net/vlan/if_vlan_ether.h>
103 #include <dev/netif/mii_layer/mii.h>
104 #include <dev/netif/mii_layer/miivar.h>
105 #include <dev/netif/mii_layer/brgphyreg.h>
107 #include <bus/pci/pcidevs.h>
108 #include <bus/pci/pcireg.h>
109 #include <bus/pci/pcivar.h>
111 #include <dev/netif/bge/if_bgereg.h>
113 /* "device miibus" required. See GENERIC if you get errors here. */
114 #include "miibus_if.h"
116 #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
117 #define BGE_MIN_FRAME 60
119 static const struct bge_type bge_devs[] = {
120 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996,
121 "3COM 3C996 Gigabit Ethernet" },
123 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700,
124 "Alteon BCM5700 Gigabit Ethernet" },
125 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701,
126 "Alteon BCM5701 Gigabit Ethernet" },
128 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000,
129 "Altima AC1000 Gigabit Ethernet" },
130 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001,
131 "Altima AC1002 Gigabit Ethernet" },
132 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100,
133 "Altima AC9100 Gigabit Ethernet" },
135 { PCI_VENDOR_APPLE, PCI_PRODUCT_APPLE_BCM5701,
136 "Apple BCM5701 Gigabit Ethernet" },
138 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700,
139 "Broadcom BCM5700 Gigabit Ethernet" },
140 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701,
141 "Broadcom BCM5701 Gigabit Ethernet" },
142 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702,
143 "Broadcom BCM5702 Gigabit Ethernet" },
144 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X,
145 "Broadcom BCM5702X Gigabit Ethernet" },
146 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT,
147 "Broadcom BCM5702 Gigabit Ethernet" },
148 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703,
149 "Broadcom BCM5703 Gigabit Ethernet" },
150 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X,
151 "Broadcom BCM5703X Gigabit Ethernet" },
152 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703A3,
153 "Broadcom BCM5703 Gigabit Ethernet" },
154 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C,
155 "Broadcom BCM5704C Dual Gigabit Ethernet" },
156 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S,
157 "Broadcom BCM5704S Dual Gigabit Ethernet" },
158 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S_ALT,
159 "Broadcom BCM5704S Dual Gigabit Ethernet" },
160 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705,
161 "Broadcom BCM5705 Gigabit Ethernet" },
162 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705F,
163 "Broadcom BCM5705F Gigabit Ethernet" },
164 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K,
165 "Broadcom BCM5705K Gigabit Ethernet" },
166 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M,
167 "Broadcom BCM5705M Gigabit Ethernet" },
168 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT,
169 "Broadcom BCM5705M Gigabit Ethernet" },
170 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714,
171 "Broadcom BCM5714C Gigabit Ethernet" },
172 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714S,
173 "Broadcom BCM5714S Gigabit Ethernet" },
174 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715,
175 "Broadcom BCM5715 Gigabit Ethernet" },
176 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715S,
177 "Broadcom BCM5715S Gigabit Ethernet" },
178 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720,
179 "Broadcom BCM5720 Gigabit Ethernet" },
180 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721,
181 "Broadcom BCM5721 Gigabit Ethernet" },
182 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5722,
183 "Broadcom BCM5722 Gigabit Ethernet" },
184 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750,
185 "Broadcom BCM5750 Gigabit Ethernet" },
186 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750M,
187 "Broadcom BCM5750M Gigabit Ethernet" },
188 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751,
189 "Broadcom BCM5751 Gigabit Ethernet" },
190 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751F,
191 "Broadcom BCM5751F Gigabit Ethernet" },
192 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M,
193 "Broadcom BCM5751M Gigabit Ethernet" },
194 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752,
195 "Broadcom BCM5752 Gigabit Ethernet" },
196 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752M,
197 "Broadcom BCM5752M Gigabit Ethernet" },
198 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753,
199 "Broadcom BCM5753 Gigabit Ethernet" },
200 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753F,
201 "Broadcom BCM5753F Gigabit Ethernet" },
202 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753M,
203 "Broadcom BCM5753M Gigabit Ethernet" },
204 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754,
205 "Broadcom BCM5754 Gigabit Ethernet" },
206 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754M,
207 "Broadcom BCM5754M Gigabit Ethernet" },
208 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755,
209 "Broadcom BCM5755 Gigabit Ethernet" },
210 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755M,
211 "Broadcom BCM5755M Gigabit Ethernet" },
212 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5756,
213 "Broadcom BCM5756 Gigabit Ethernet" },
214 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780,
215 "Broadcom BCM5780 Gigabit Ethernet" },
216 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780S,
217 "Broadcom BCM5780S Gigabit Ethernet" },
218 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5781,
219 "Broadcom BCM5781 Gigabit Ethernet" },
220 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782,
221 "Broadcom BCM5782 Gigabit Ethernet" },
222 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5786,
223 "Broadcom BCM5786 Gigabit Ethernet" },
224 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787,
225 "Broadcom BCM5787 Gigabit Ethernet" },
226 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787F,
227 "Broadcom BCM5787F Gigabit Ethernet" },
228 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787M,
229 "Broadcom BCM5787M Gigabit Ethernet" },
230 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788,
231 "Broadcom BCM5788 Gigabit Ethernet" },
232 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789,
233 "Broadcom BCM5789 Gigabit Ethernet" },
234 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901,
235 "Broadcom BCM5901 Fast Ethernet" },
236 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2,
237 "Broadcom BCM5901A2 Fast Ethernet" },
238 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5903M,
239 "Broadcom BCM5903M Fast Ethernet" },
240 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906,
241 "Broadcom BCM5906 Fast Ethernet"},
242 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906M,
243 "Broadcom BCM5906M Fast Ethernet"},
245 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
246 "SysKonnect Gigabit Ethernet" },
251 #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
252 #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
253 #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
254 #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
255 #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
257 typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
259 static int bge_probe(device_t);
260 static int bge_attach(device_t);
261 static int bge_detach(device_t);
262 static void bge_txeof(struct bge_softc *);
263 static void bge_rxeof(struct bge_softc *);
265 static void bge_tick(void *);
266 static void bge_stats_update(struct bge_softc *);
267 static void bge_stats_update_regs(struct bge_softc *);
268 static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
270 #ifdef DEVICE_POLLING
271 static void bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
273 static void bge_intr(void *);
274 static void bge_enable_intr(struct bge_softc *);
275 static void bge_disable_intr(struct bge_softc *);
276 static void bge_start(struct ifnet *);
277 static int bge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
278 static void bge_init(void *);
279 static void bge_stop(struct bge_softc *);
280 static void bge_watchdog(struct ifnet *);
281 static void bge_shutdown(device_t);
282 static int bge_suspend(device_t);
283 static int bge_resume(device_t);
284 static int bge_ifmedia_upd(struct ifnet *);
285 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
287 static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
288 static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
290 static uint8_t bge_eeprom_getbyte(struct bge_softc *, uint32_t, uint8_t *);
291 static int bge_read_eeprom(struct bge_softc *, caddr_t, uint32_t, size_t);
293 static void bge_setmulti(struct bge_softc *);
294 static void bge_setpromisc(struct bge_softc *);
296 static int bge_alloc_jumbo_mem(struct bge_softc *);
297 static void bge_free_jumbo_mem(struct bge_softc *);
298 static struct bge_jslot
299 *bge_jalloc(struct bge_softc *);
300 static void bge_jfree(void *);
301 static void bge_jref(void *);
302 static int bge_newbuf_std(struct bge_softc *, int, int);
303 static int bge_newbuf_jumbo(struct bge_softc *, int, int);
304 static void bge_setup_rxdesc_std(struct bge_softc *, int);
305 static void bge_setup_rxdesc_jumbo(struct bge_softc *, int);
306 static int bge_init_rx_ring_std(struct bge_softc *);
307 static void bge_free_rx_ring_std(struct bge_softc *);
308 static int bge_init_rx_ring_jumbo(struct bge_softc *);
309 static void bge_free_rx_ring_jumbo(struct bge_softc *);
310 static void bge_free_tx_ring(struct bge_softc *);
311 static int bge_init_tx_ring(struct bge_softc *);
313 static int bge_chipinit(struct bge_softc *);
314 static int bge_blockinit(struct bge_softc *);
316 static uint32_t bge_readmem_ind(struct bge_softc *, uint32_t);
317 static void bge_writemem_ind(struct bge_softc *, uint32_t, uint32_t);
319 static uint32_t bge_readreg_ind(struct bge_softc *, uint32_t);
321 static void bge_writereg_ind(struct bge_softc *, uint32_t, uint32_t);
322 static void bge_writemem_direct(struct bge_softc *, uint32_t, uint32_t);
323 static void bge_writembx(struct bge_softc *, int, int);
325 static int bge_miibus_readreg(device_t, int, int);
326 static int bge_miibus_writereg(device_t, int, int, int);
327 static void bge_miibus_statchg(device_t);
328 static void bge_bcm5700_link_upd(struct bge_softc *, uint32_t);
329 static void bge_tbi_link_upd(struct bge_softc *, uint32_t);
330 static void bge_copper_link_upd(struct bge_softc *, uint32_t);
332 static void bge_reset(struct bge_softc *);
334 static int bge_dma_alloc(struct bge_softc *);
335 static void bge_dma_free(struct bge_softc *);
336 static int bge_dma_block_alloc(struct bge_softc *, bus_size_t,
337 bus_dma_tag_t *, bus_dmamap_t *,
338 void **, bus_addr_t *);
339 static void bge_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
341 static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
342 static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
343 static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
344 static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
346 static void bge_coal_change(struct bge_softc *);
347 static int bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS);
348 static int bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS);
349 static int bge_sysctl_rx_max_coal_bds(SYSCTL_HANDLER_ARGS);
350 static int bge_sysctl_tx_max_coal_bds(SYSCTL_HANDLER_ARGS);
351 static int bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *, uint32_t);
354 * Set following tunable to 1 for some IBM blade servers with the DNLK
355 * switch module. Auto negotiation is broken for those configurations.
357 static int bge_fake_autoneg = 0;
358 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg);
360 /* Interrupt moderation control variables. */
361 static int bge_rx_coal_ticks = 100; /* usec */
362 static int bge_tx_coal_ticks = 1023; /* usec */
363 static int bge_rx_max_coal_bds = 80;
364 static int bge_tx_max_coal_bds = 128;
366 TUNABLE_INT("hw.bge.rx_coal_ticks", &bge_rx_coal_ticks);
367 TUNABLE_INT("hw.bge.tx_coal_ticks", &bge_tx_coal_ticks);
368 TUNABLE_INT("hw.bge.rx_max_coal_bds", &bge_rx_max_coal_bds);
369 TUNABLE_INT("hw.bge.tx_max_coal_bds", &bge_tx_max_coal_bds);
371 #if !defined(KTR_IF_BGE)
372 #define KTR_IF_BGE KTR_ALL
374 KTR_INFO_MASTER(if_bge);
375 KTR_INFO(KTR_IF_BGE, if_bge, intr, 0, "intr", 0);
376 KTR_INFO(KTR_IF_BGE, if_bge, rx_pkt, 1, "rx_pkt", 0);
377 KTR_INFO(KTR_IF_BGE, if_bge, tx_pkt, 2, "tx_pkt", 0);
378 #define logif(name) KTR_LOG(if_bge_ ## name)
380 static device_method_t bge_methods[] = {
381 /* Device interface */
382 DEVMETHOD(device_probe, bge_probe),
383 DEVMETHOD(device_attach, bge_attach),
384 DEVMETHOD(device_detach, bge_detach),
385 DEVMETHOD(device_shutdown, bge_shutdown),
386 DEVMETHOD(device_suspend, bge_suspend),
387 DEVMETHOD(device_resume, bge_resume),
390 DEVMETHOD(bus_print_child, bus_generic_print_child),
391 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
394 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
395 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
396 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
401 static DEFINE_CLASS_0(bge, bge_driver, bge_methods, sizeof(struct bge_softc));
402 static devclass_t bge_devclass;
404 DECLARE_DUMMY_MODULE(if_bge);
405 DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, 0, 0);
406 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
409 bge_readmem_ind(struct bge_softc *sc, uint32_t off)
411 device_t dev = sc->bge_dev;
414 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
415 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
416 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
421 bge_writemem_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
423 device_t dev = sc->bge_dev;
425 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
426 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
427 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
432 bge_readreg_ind(struct bge_softc *sc, uin32_t off)
434 device_t dev = sc->bge_dev;
436 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
437 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
442 bge_writereg_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
444 device_t dev = sc->bge_dev;
446 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
447 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
451 bge_writemem_direct(struct bge_softc *sc, uint32_t off, uint32_t val)
453 CSR_WRITE_4(sc, off, val);
457 bge_writembx(struct bge_softc *sc, int off, int val)
459 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
460 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
462 CSR_WRITE_4(sc, off, val);
466 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
468 uint32_t access, byte = 0;
472 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
473 for (i = 0; i < 8000; i++) {
474 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
482 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
483 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
485 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
486 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
487 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
489 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
495 if (i == BGE_TIMEOUT * 10) {
496 if_printf(&sc->arpcom.ac_if, "nvram read timed out\n");
501 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
503 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
505 /* Disable access. */
506 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
509 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
510 CSR_READ_4(sc, BGE_NVRAM_SWARB);
516 * Read a sequence of bytes from NVRAM.
519 bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
524 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
527 for (i = 0; i < cnt; i++) {
528 err = bge_nvram_getbyte(sc, off + i, &byte);
534 return (err ? 1 : 0);
538 * Read a byte of data stored in the EEPROM at address 'addr.' The
539 * BCM570x supports both the traditional bitbang interface and an
540 * auto access interface for reading the EEPROM. We use the auto
544 bge_eeprom_getbyte(struct bge_softc *sc, uint32_t addr, uint8_t *dest)
550 * Enable use of auto EEPROM access so we can avoid
551 * having to use the bitbang method.
553 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
555 /* Reset the EEPROM, load the clock period. */
556 CSR_WRITE_4(sc, BGE_EE_ADDR,
557 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
560 /* Issue the read EEPROM command. */
561 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
563 /* Wait for completion */
564 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
566 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
570 if (i == BGE_TIMEOUT) {
571 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
576 byte = CSR_READ_4(sc, BGE_EE_DATA);
578 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
584 * Read a sequence of bytes from the EEPROM.
587 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, uint32_t off, size_t len)
593 for (byte = 0, err = 0, i = 0; i < len; i++) {
594 err = bge_eeprom_getbyte(sc, off + i, &byte);
604 bge_miibus_readreg(device_t dev, int phy, int reg)
606 struct bge_softc *sc = device_get_softc(dev);
607 struct ifnet *ifp = &sc->arpcom.ac_if;
608 uint32_t val, autopoll;
612 * Broadcom's own driver always assumes the internal
613 * PHY is at GMII address 1. On some chips, the PHY responds
614 * to accesses at all addresses, which could cause us to
615 * bogusly attach the PHY 32 times at probe type. Always
616 * restricting the lookup to address 1 is simpler than
617 * trying to figure out which chips revisions should be
623 /* Reading with autopolling on may trigger PCI errors */
624 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
625 if (autopoll & BGE_MIMODE_AUTOPOLL) {
626 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
630 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
631 BGE_MIPHY(phy)|BGE_MIREG(reg));
633 for (i = 0; i < BGE_TIMEOUT; i++) {
635 val = CSR_READ_4(sc, BGE_MI_COMM);
636 if (!(val & BGE_MICOMM_BUSY))
640 if (i == BGE_TIMEOUT) {
641 if_printf(ifp, "PHY read timed out "
642 "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val);
648 val = CSR_READ_4(sc, BGE_MI_COMM);
651 if (autopoll & BGE_MIMODE_AUTOPOLL) {
652 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
656 if (val & BGE_MICOMM_READFAIL)
659 return(val & 0xFFFF);
663 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
665 struct bge_softc *sc = device_get_softc(dev);
670 * See the related comment in bge_miibus_readreg()
675 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
676 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
679 /* Reading with autopolling on may trigger PCI errors */
680 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
681 if (autopoll & BGE_MIMODE_AUTOPOLL) {
682 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
686 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
687 BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
689 for (i = 0; i < BGE_TIMEOUT; i++) {
691 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
693 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
698 if (autopoll & BGE_MIMODE_AUTOPOLL) {
699 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
703 if (i == BGE_TIMEOUT) {
704 if_printf(&sc->arpcom.ac_if, "PHY write timed out "
705 "(phy %d, reg %d, val %d)\n", phy, reg, val);
713 bge_miibus_statchg(device_t dev)
715 struct bge_softc *sc;
716 struct mii_data *mii;
718 sc = device_get_softc(dev);
719 mii = device_get_softc(sc->bge_miibus);
721 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
722 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
723 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
725 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
728 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
729 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
731 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
736 * Memory management for jumbo frames.
739 bge_alloc_jumbo_mem(struct bge_softc *sc)
741 struct ifnet *ifp = &sc->arpcom.ac_if;
742 struct bge_jslot *entry;
748 * Create tag for jumbo mbufs.
749 * This is really a bit of a kludge. We allocate a special
750 * jumbo buffer pool which (thanks to the way our DMA
751 * memory allocation works) will consist of contiguous
752 * pages. This means that even though a jumbo buffer might
753 * be larger than a page size, we don't really need to
754 * map it into more than one DMA segment. However, the
755 * default mbuf tag will result in multi-segment mappings,
756 * so we have to create a special jumbo mbuf tag that
757 * lets us get away with mapping the jumbo buffers as
758 * a single segment. I think eventually the driver should
759 * be changed so that it uses ordinary mbufs and cluster
760 * buffers, i.e. jumbo frames can span multiple DMA
761 * descriptors. But that's a project for another day.
765 * Create DMA stuffs for jumbo RX ring.
767 error = bge_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ,
768 &sc->bge_cdata.bge_rx_jumbo_ring_tag,
769 &sc->bge_cdata.bge_rx_jumbo_ring_map,
770 (void *)&sc->bge_ldata.bge_rx_jumbo_ring,
771 &sc->bge_ldata.bge_rx_jumbo_ring_paddr);
773 if_printf(ifp, "could not create jumbo RX ring\n");
778 * Create DMA stuffs for jumbo buffer block.
780 error = bge_dma_block_alloc(sc, BGE_JMEM,
781 &sc->bge_cdata.bge_jumbo_tag,
782 &sc->bge_cdata.bge_jumbo_map,
783 (void **)&sc->bge_ldata.bge_jumbo_buf,
786 if_printf(ifp, "could not create jumbo buffer\n");
790 SLIST_INIT(&sc->bge_jfree_listhead);
793 * Now divide it up into 9K pieces and save the addresses
794 * in an array. Note that we play an evil trick here by using
795 * the first few bytes in the buffer to hold the the address
796 * of the softc structure for this interface. This is because
797 * bge_jfree() needs it, but it is called by the mbuf management
798 * code which will not pass it to us explicitly.
800 for (i = 0, ptr = sc->bge_ldata.bge_jumbo_buf; i < BGE_JSLOTS; i++) {
801 entry = &sc->bge_cdata.bge_jslots[i];
803 entry->bge_buf = ptr;
804 entry->bge_paddr = paddr;
805 entry->bge_inuse = 0;
807 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jslot_link);
816 bge_free_jumbo_mem(struct bge_softc *sc)
818 /* Destroy jumbo RX ring. */
819 bge_dma_block_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
820 sc->bge_cdata.bge_rx_jumbo_ring_map,
821 sc->bge_ldata.bge_rx_jumbo_ring);
823 /* Destroy jumbo buffer block. */
824 bge_dma_block_free(sc->bge_cdata.bge_jumbo_tag,
825 sc->bge_cdata.bge_jumbo_map,
826 sc->bge_ldata.bge_jumbo_buf);
830 * Allocate a jumbo buffer.
832 static struct bge_jslot *
833 bge_jalloc(struct bge_softc *sc)
835 struct bge_jslot *entry;
837 lwkt_serialize_enter(&sc->bge_jslot_serializer);
838 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
840 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jslot_link);
841 entry->bge_inuse = 1;
843 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
845 lwkt_serialize_exit(&sc->bge_jslot_serializer);
850 * Adjust usage count on a jumbo buffer.
855 struct bge_jslot *entry = (struct bge_jslot *)arg;
856 struct bge_softc *sc = entry->bge_sc;
859 panic("bge_jref: can't find softc pointer!");
861 if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
862 panic("bge_jref: asked to reference buffer "
863 "that we don't manage!");
864 } else if (entry->bge_inuse == 0) {
865 panic("bge_jref: buffer already free!");
867 atomic_add_int(&entry->bge_inuse, 1);
872 * Release a jumbo buffer.
877 struct bge_jslot *entry = (struct bge_jslot *)arg;
878 struct bge_softc *sc = entry->bge_sc;
881 panic("bge_jfree: can't find softc pointer!");
883 if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
884 panic("bge_jfree: asked to free buffer that we don't manage!");
885 } else if (entry->bge_inuse == 0) {
886 panic("bge_jfree: buffer already free!");
889 * Possible MP race to 0, use the serializer. The atomic insn
890 * is still needed for races against bge_jref().
892 lwkt_serialize_enter(&sc->bge_jslot_serializer);
893 atomic_subtract_int(&entry->bge_inuse, 1);
894 if (entry->bge_inuse == 0) {
895 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
898 lwkt_serialize_exit(&sc->bge_jslot_serializer);
904 * Intialize a standard receive ring descriptor.
907 bge_newbuf_std(struct bge_softc *sc, int i, int init)
909 struct mbuf *m_new = NULL;
910 bus_dma_segment_t seg;
914 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
917 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
919 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
920 m_adj(m_new, ETHER_ALIGN);
922 error = bus_dmamap_load_mbuf_segment(sc->bge_cdata.bge_rx_mtag,
923 sc->bge_cdata.bge_rx_tmpmap, m_new,
924 &seg, 1, &nsegs, BUS_DMA_NOWAIT);
931 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
932 sc->bge_cdata.bge_rx_std_dmamap[i],
933 BUS_DMASYNC_POSTREAD);
934 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
935 sc->bge_cdata.bge_rx_std_dmamap[i]);
938 map = sc->bge_cdata.bge_rx_tmpmap;
939 sc->bge_cdata.bge_rx_tmpmap = sc->bge_cdata.bge_rx_std_dmamap[i];
940 sc->bge_cdata.bge_rx_std_dmamap[i] = map;
942 sc->bge_cdata.bge_rx_std_chain[i].bge_mbuf = m_new;
943 sc->bge_cdata.bge_rx_std_chain[i].bge_paddr = seg.ds_addr;
945 bge_setup_rxdesc_std(sc, i);
950 bge_setup_rxdesc_std(struct bge_softc *sc, int i)
952 struct bge_rxchain *rc;
955 rc = &sc->bge_cdata.bge_rx_std_chain[i];
956 r = &sc->bge_ldata.bge_rx_std_ring[i];
958 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bge_paddr);
959 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bge_paddr);
960 r->bge_len = rc->bge_mbuf->m_len;
962 r->bge_flags = BGE_RXBDFLAG_END;
966 * Initialize a jumbo receive ring descriptor. This allocates
967 * a jumbo buffer from the pool managed internally by the driver.
970 bge_newbuf_jumbo(struct bge_softc *sc, int i, int init)
972 struct mbuf *m_new = NULL;
973 struct bge_jslot *buf;
976 /* Allocate the mbuf. */
977 MGETHDR(m_new, init ? MB_WAIT : MB_DONTWAIT, MT_DATA);
981 /* Allocate the jumbo buffer */
982 buf = bge_jalloc(sc);
988 /* Attach the buffer to the mbuf. */
989 m_new->m_ext.ext_arg = buf;
990 m_new->m_ext.ext_buf = buf->bge_buf;
991 m_new->m_ext.ext_free = bge_jfree;
992 m_new->m_ext.ext_ref = bge_jref;
993 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
995 m_new->m_flags |= M_EXT;
997 m_new->m_data = m_new->m_ext.ext_buf;
998 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
1000 paddr = buf->bge_paddr;
1001 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) {
1002 m_adj(m_new, ETHER_ALIGN);
1003 paddr += ETHER_ALIGN;
1006 /* Save necessary information */
1007 sc->bge_cdata.bge_rx_jumbo_chain[i].bge_mbuf = m_new;
1008 sc->bge_cdata.bge_rx_jumbo_chain[i].bge_paddr = paddr;
1010 /* Set up the descriptor. */
1011 bge_setup_rxdesc_jumbo(sc, i);
1016 bge_setup_rxdesc_jumbo(struct bge_softc *sc, int i)
1018 struct bge_rx_bd *r;
1019 struct bge_rxchain *rc;
1021 r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
1022 rc = &sc->bge_cdata.bge_rx_jumbo_chain[i];
1024 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bge_paddr);
1025 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bge_paddr);
1026 r->bge_len = rc->bge_mbuf->m_len;
1028 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
1032 bge_init_rx_ring_std(struct bge_softc *sc)
1036 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1037 error = bge_newbuf_std(sc, i, 1);
1042 sc->bge_std = BGE_STD_RX_RING_CNT - 1;
1043 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1049 bge_free_rx_ring_std(struct bge_softc *sc)
1053 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1054 struct bge_rxchain *rc = &sc->bge_cdata.bge_rx_std_chain[i];
1056 if (rc->bge_mbuf != NULL) {
1057 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1058 sc->bge_cdata.bge_rx_std_dmamap[i]);
1059 m_freem(rc->bge_mbuf);
1060 rc->bge_mbuf = NULL;
1062 bzero(&sc->bge_ldata.bge_rx_std_ring[i],
1063 sizeof(struct bge_rx_bd));
1068 bge_init_rx_ring_jumbo(struct bge_softc *sc)
1070 struct bge_rcb *rcb;
1073 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1074 error = bge_newbuf_jumbo(sc, i, 1);
1079 sc->bge_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
1081 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1082 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
1083 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1085 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1091 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1095 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1096 struct bge_rxchain *rc = &sc->bge_cdata.bge_rx_jumbo_chain[i];
1098 if (rc->bge_mbuf != NULL) {
1099 m_freem(rc->bge_mbuf);
1100 rc->bge_mbuf = NULL;
1102 bzero(&sc->bge_ldata.bge_rx_jumbo_ring[i],
1103 sizeof(struct bge_rx_bd));
1108 bge_free_tx_ring(struct bge_softc *sc)
1112 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1113 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1114 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1115 sc->bge_cdata.bge_tx_dmamap[i]);
1116 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1117 sc->bge_cdata.bge_tx_chain[i] = NULL;
1119 bzero(&sc->bge_ldata.bge_tx_ring[i],
1120 sizeof(struct bge_tx_bd));
1125 bge_init_tx_ring(struct bge_softc *sc)
1128 sc->bge_tx_saved_considx = 0;
1129 sc->bge_tx_prodidx = 0;
1131 /* Initialize transmit producer index for host-memory send ring. */
1132 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1134 /* 5700 b2 errata */
1135 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1136 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1138 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1139 /* 5700 b2 errata */
1140 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1141 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1147 bge_setmulti(struct bge_softc *sc)
1150 struct ifmultiaddr *ifma;
1151 uint32_t hashes[4] = { 0, 0, 0, 0 };
1154 ifp = &sc->arpcom.ac_if;
1156 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1157 for (i = 0; i < 4; i++)
1158 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1162 /* First, zot all the existing filters. */
1163 for (i = 0; i < 4; i++)
1164 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1166 /* Now program new ones. */
1167 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1168 if (ifma->ifma_addr->sa_family != AF_LINK)
1171 LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1172 ETHER_ADDR_LEN) & 0x7f;
1173 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1176 for (i = 0; i < 4; i++)
1177 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1181 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1182 * self-test results.
1185 bge_chipinit(struct bge_softc *sc)
1188 uint32_t dma_rw_ctl;
1190 /* Set endian type before we access any non-PCI registers. */
1191 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1193 /* Clear the MAC control register */
1194 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1197 * Clear the MAC statistics block in the NIC's
1200 for (i = BGE_STATS_BLOCK;
1201 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1202 BGE_MEMWIN_WRITE(sc, i, 0);
1204 for (i = BGE_STATUS_BLOCK;
1205 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1206 BGE_MEMWIN_WRITE(sc, i, 0);
1208 /* Set up the PCI DMA control register. */
1209 if (sc->bge_flags & BGE_FLAG_PCIE) {
1211 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1212 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1213 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1214 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1216 if (BGE_IS_5714_FAMILY(sc)) {
1217 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD;
1218 dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */
1219 /* XXX magic values, Broadcom-supplied Linux driver */
1220 if (sc->bge_asicrev == BGE_ASICREV_BCM5780) {
1221 dma_rw_ctl |= (1 << 20) | (1 << 18) |
1222 BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1224 dma_rw_ctl |= (1 << 20) | (1 << 18) | (1 << 15);
1226 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1228 * The 5704 uses a different encoding of read/write
1231 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1232 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1233 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1235 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1236 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1237 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1242 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1243 * for hardware bugs.
1245 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1246 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1249 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1250 if (tmp == 0x6 || tmp == 0x7)
1251 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1254 /* Conventional PCI bus */
1255 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1256 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1257 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1261 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1262 sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1263 sc->bge_asicrev == BGE_ASICREV_BCM5705)
1264 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1265 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1268 * Set up general mode register.
1270 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1271 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1272 BGE_MODECTL_TX_NO_PHDR_CSUM);
1275 * Disable memory write invalidate. Apparently it is not supported
1276 * properly by these devices.
1278 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1280 /* Set the timer prescaler (always 66Mhz) */
1281 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1283 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1284 DELAY(40); /* XXX */
1286 /* Put PHY into ready state */
1287 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1288 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1296 bge_blockinit(struct bge_softc *sc)
1298 struct bge_rcb *rcb;
1305 * Initialize the memory window pointer register so that
1306 * we can access the first 32K of internal NIC RAM. This will
1307 * allow us to set up the TX send ring RCBs and the RX return
1308 * ring RCBs, plus other things which live in NIC memory.
1310 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1312 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1314 if (!BGE_IS_5705_PLUS(sc)) {
1315 /* Configure mbuf memory pool */
1316 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1317 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1318 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1320 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1322 /* Configure DMA resource pool */
1323 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1324 BGE_DMA_DESCRIPTORS);
1325 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1328 /* Configure mbuf pool watermarks */
1329 if (!BGE_IS_5705_PLUS(sc)) {
1330 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1331 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1332 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1333 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1334 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1335 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1336 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1338 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1339 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1340 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1343 /* Configure DMA resource watermarks */
1344 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1345 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1347 /* Enable buffer manager */
1348 if (!BGE_IS_5705_PLUS(sc)) {
1349 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1350 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1352 /* Poll for buffer manager start indication */
1353 for (i = 0; i < BGE_TIMEOUT; i++) {
1354 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1359 if (i == BGE_TIMEOUT) {
1360 if_printf(&sc->arpcom.ac_if,
1361 "buffer manager failed to start\n");
1366 /* Enable flow-through queues */
1367 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1368 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1370 /* Wait until queue initialization is complete */
1371 for (i = 0; i < BGE_TIMEOUT; i++) {
1372 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1377 if (i == BGE_TIMEOUT) {
1378 if_printf(&sc->arpcom.ac_if,
1379 "flow-through queue init failed\n");
1383 /* Initialize the standard RX ring control block */
1384 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1385 rcb->bge_hostaddr.bge_addr_lo =
1386 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1387 rcb->bge_hostaddr.bge_addr_hi =
1388 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1389 if (BGE_IS_5705_PLUS(sc))
1390 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1392 rcb->bge_maxlen_flags =
1393 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1394 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1395 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1396 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1397 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1398 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1401 * Initialize the jumbo RX ring control block
1402 * We set the 'ring disabled' bit in the flags
1403 * field until we're actually ready to start
1404 * using this ring (i.e. once we set the MTU
1405 * high enough to require it).
1407 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1408 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1410 rcb->bge_hostaddr.bge_addr_lo =
1411 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1412 rcb->bge_hostaddr.bge_addr_hi =
1413 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1414 rcb->bge_maxlen_flags =
1415 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1416 BGE_RCB_FLAG_RING_DISABLED);
1417 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1418 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1419 rcb->bge_hostaddr.bge_addr_hi);
1420 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1421 rcb->bge_hostaddr.bge_addr_lo);
1422 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1423 rcb->bge_maxlen_flags);
1424 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1426 /* Set up dummy disabled mini ring RCB */
1427 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1428 rcb->bge_maxlen_flags =
1429 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1430 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1431 rcb->bge_maxlen_flags);
1435 * Set the BD ring replentish thresholds. The recommended
1436 * values are 1/8th the number of descriptors allocated to
1439 if (BGE_IS_5705_PLUS(sc))
1442 val = BGE_STD_RX_RING_CNT / 8;
1443 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1444 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1447 * Disable all unused send rings by setting the 'ring disabled'
1448 * bit in the flags field of all the TX send ring control blocks.
1449 * These are located in NIC memory.
1451 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1452 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1453 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1454 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1455 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1456 vrcb += sizeof(struct bge_rcb);
1459 /* Configure TX RCB 0 (we use only the first ring) */
1460 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1461 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1462 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1463 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1464 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1465 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1466 if (!BGE_IS_5705_PLUS(sc)) {
1467 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1468 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1471 /* Disable all unused RX return rings */
1472 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1473 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1474 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1475 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1476 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1477 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1478 BGE_RCB_FLAG_RING_DISABLED));
1479 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1480 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1481 (i * (sizeof(uint64_t))), 0);
1482 vrcb += sizeof(struct bge_rcb);
1485 /* Initialize RX ring indexes */
1486 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1487 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1488 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1491 * Set up RX return ring 0
1492 * Note that the NIC address for RX return rings is 0x00000000.
1493 * The return rings live entirely within the host, so the
1494 * nicaddr field in the RCB isn't used.
1496 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1497 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1498 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1499 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1500 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1501 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1502 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1504 /* Set random backoff seed for TX */
1505 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1506 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1507 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1508 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1509 BGE_TX_BACKOFF_SEED_MASK);
1511 /* Set inter-packet gap */
1512 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1515 * Specify which ring to use for packets that don't match
1518 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1521 * Configure number of RX lists. One interrupt distribution
1522 * list, sixteen active lists, one bad frames class.
1524 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1526 /* Inialize RX list placement stats mask. */
1527 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1528 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1530 /* Disable host coalescing until we get it set up */
1531 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1533 /* Poll to make sure it's shut down. */
1534 for (i = 0; i < BGE_TIMEOUT; i++) {
1535 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1540 if (i == BGE_TIMEOUT) {
1541 if_printf(&sc->arpcom.ac_if,
1542 "host coalescing engine failed to idle\n");
1546 /* Set up host coalescing defaults */
1547 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1548 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1549 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1550 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1551 if (!BGE_IS_5705_PLUS(sc)) {
1552 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1553 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1555 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1556 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1558 /* Set up address of statistics block */
1559 if (!BGE_IS_5705_PLUS(sc)) {
1560 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1561 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1562 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1563 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1565 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1566 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1567 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1570 /* Set up address of status block */
1571 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1572 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1573 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1574 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1575 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1576 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1578 /* Turn on host coalescing state machine */
1579 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1581 /* Turn on RX BD completion state machine and enable attentions */
1582 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1583 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1585 /* Turn on RX list placement state machine */
1586 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1588 /* Turn on RX list selector state machine. */
1589 if (!BGE_IS_5705_PLUS(sc))
1590 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1592 /* Turn on DMA, clear stats */
1593 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1594 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1595 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1596 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1597 ((sc->bge_flags & BGE_FLAG_TBI) ?
1598 BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1600 /* Set misc. local control, enable interrupts on attentions */
1601 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1604 /* Assert GPIO pins for PHY reset */
1605 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1606 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1607 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1608 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1611 /* Turn on DMA completion state machine */
1612 if (!BGE_IS_5705_PLUS(sc))
1613 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1615 /* Turn on write DMA state machine */
1616 val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1617 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1618 sc->bge_asicrev == BGE_ASICREV_BCM5787)
1619 val |= (1 << 29); /* Enable host coalescing bug fix. */
1620 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1623 /* Turn on read DMA state machine */
1624 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1625 if (sc->bge_flags & BGE_FLAG_PCIE)
1626 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1627 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1630 /* Turn on RX data completion state machine */
1631 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1633 /* Turn on RX BD initiator state machine */
1634 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1636 /* Turn on RX data and RX BD initiator state machine */
1637 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1639 /* Turn on Mbuf cluster free state machine */
1640 if (!BGE_IS_5705_PLUS(sc))
1641 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1643 /* Turn on send BD completion state machine */
1644 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1646 /* Turn on send data completion state machine */
1647 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1649 /* Turn on send data initiator state machine */
1650 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1652 /* Turn on send BD initiator state machine */
1653 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1655 /* Turn on send BD selector state machine */
1656 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1658 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1659 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1660 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1662 /* ack/clear link change events */
1663 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1664 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1665 BGE_MACSTAT_LINK_CHANGED);
1666 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1668 /* Enable PHY auto polling (for MII/GMII only) */
1669 if (sc->bge_flags & BGE_FLAG_TBI) {
1670 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1672 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1673 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1674 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
1675 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1676 BGE_EVTENB_MI_INTERRUPT);
1681 * Clear any pending link state attention.
1682 * Otherwise some link state change events may be lost until attention
1683 * is cleared by bge_intr() -> bge_softc.bge_link_upd() sequence.
1684 * It's not necessary on newer BCM chips - perhaps enabling link
1685 * state change attentions implies clearing pending attention.
1687 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1688 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1689 BGE_MACSTAT_LINK_CHANGED);
1691 /* Enable link state change attentions. */
1692 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1698 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1699 * against our list and return its name if we find a match. Note
1700 * that since the Broadcom controller contains VPD support, we
1701 * can get the device name string from the controller itself instead
1702 * of the compiled-in string. This is a little slow, but it guarantees
1703 * we'll always announce the right product name.
1706 bge_probe(device_t dev)
1708 const struct bge_type *t;
1709 uint16_t product, vendor;
1711 product = pci_get_device(dev);
1712 vendor = pci_get_vendor(dev);
1714 for (t = bge_devs; t->bge_name != NULL; t++) {
1715 if (vendor == t->bge_vid && product == t->bge_did)
1718 if (t->bge_name == NULL)
1721 device_set_desc(dev, t->bge_name);
1722 if (pci_get_subvendor(dev) == PCI_VENDOR_DELL) {
1723 struct bge_softc *sc = device_get_softc(dev);
1724 sc->bge_flags |= BGE_FLAG_NO_3LED;
1730 bge_attach(device_t dev)
1733 struct bge_softc *sc;
1736 uint8_t ether_addr[ETHER_ADDR_LEN];
1738 sc = device_get_softc(dev);
1740 callout_init(&sc->bge_stat_timer);
1741 lwkt_serialize_init(&sc->bge_jslot_serializer);
1743 #ifndef BURN_BRIDGES
1744 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1747 irq = pci_read_config(dev, PCIR_INTLINE, 4);
1748 mem = pci_read_config(dev, BGE_PCI_BAR0, 4);
1750 device_printf(dev, "chip is in D%d power mode "
1751 "-- setting to D0\n", pci_get_powerstate(dev));
1753 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1755 pci_write_config(dev, PCIR_INTLINE, irq, 4);
1756 pci_write_config(dev, BGE_PCI_BAR0, mem, 4);
1758 #endif /* !BURN_BRIDGE */
1761 * Map control/status registers.
1763 pci_enable_busmaster(dev);
1766 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1769 if (sc->bge_res == NULL) {
1770 device_printf(dev, "couldn't map memory\n");
1774 sc->bge_btag = rman_get_bustag(sc->bge_res);
1775 sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
1777 /* Save various chip information */
1779 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1780 BGE_PCIMISCCTL_ASICREV;
1781 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
1782 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
1784 /* Save chipset family. */
1785 switch (sc->bge_asicrev) {
1786 case BGE_ASICREV_BCM5700:
1787 case BGE_ASICREV_BCM5701:
1788 case BGE_ASICREV_BCM5703:
1789 case BGE_ASICREV_BCM5704:
1790 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
1793 case BGE_ASICREV_BCM5714_A0:
1794 case BGE_ASICREV_BCM5780:
1795 case BGE_ASICREV_BCM5714:
1796 sc->bge_flags |= BGE_FLAG_5714_FAMILY;
1799 case BGE_ASICREV_BCM5750:
1800 case BGE_ASICREV_BCM5752:
1801 case BGE_ASICREV_BCM5755:
1802 case BGE_ASICREV_BCM5787:
1803 case BGE_ASICREV_BCM5906:
1804 sc->bge_flags |= BGE_FLAG_575X_PLUS;
1807 case BGE_ASICREV_BCM5705:
1808 sc->bge_flags |= BGE_FLAG_5705_PLUS;
1812 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
1813 sc->bge_flags |= BGE_FLAG_NO_EEPROM;
1816 * Set various quirk flags.
1819 sc->bge_flags |= BGE_FLAG_ETH_WIRESPEED;
1820 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1821 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
1822 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
1823 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
1824 sc->bge_asicrev == BGE_ASICREV_BCM5906)
1825 sc->bge_flags &= ~BGE_FLAG_ETH_WIRESPEED;
1827 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
1828 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
1829 sc->bge_flags |= BGE_FLAG_CRC_BUG;
1831 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
1832 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
1833 sc->bge_flags |= BGE_FLAG_ADC_BUG;
1835 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
1836 sc->bge_flags |= BGE_FLAG_5704_A0_BUG;
1838 if (BGE_IS_5705_PLUS(sc)) {
1839 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1840 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
1841 uint32_t product = pci_get_device(dev);
1843 if (product != PCI_PRODUCT_BROADCOM_BCM5722 &&
1844 product != PCI_PRODUCT_BROADCOM_BCM5756)
1845 sc->bge_flags |= BGE_FLAG_JITTER_BUG;
1846 if (product == PCI_PRODUCT_BROADCOM_BCM5755M)
1847 sc->bge_flags |= BGE_FLAG_ADJUST_TRIM;
1848 } else if (sc->bge_asicrev != BGE_ASICREV_BCM5906) {
1849 sc->bge_flags |= BGE_FLAG_BER_BUG;
1853 /* Allocate interrupt */
1856 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1857 RF_SHAREABLE | RF_ACTIVE);
1859 if (sc->bge_irq == NULL) {
1860 device_printf(dev, "couldn't map interrupt\n");
1866 * Check if this is a PCI-X or PCI Express device.
1868 if (BGE_IS_5705_PLUS(sc)) {
1869 if (pci_is_pcie(dev)) {
1870 sc->bge_flags |= BGE_FLAG_PCIE;
1871 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096);
1875 * Check if the device is in PCI-X Mode.
1876 * (This bit is not valid on PCI Express controllers.)
1878 if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1879 BGE_PCISTATE_PCI_BUSMODE) == 0)
1880 sc->bge_flags |= BGE_FLAG_PCIX;
1883 device_printf(dev, "CHIP ID 0x%08x; "
1884 "ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
1885 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
1886 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X"
1887 : ((sc->bge_flags & BGE_FLAG_PCIE) ?
1890 ifp = &sc->arpcom.ac_if;
1891 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1893 /* Try to reset the chip. */
1896 if (bge_chipinit(sc)) {
1897 device_printf(dev, "chip initialization failed\n");
1903 * Get station address
1905 error = bge_get_eaddr(sc, ether_addr);
1907 device_printf(dev, "failed to read station address\n");
1911 /* 5705/5750 limits RX return ring to 512 entries. */
1912 if (BGE_IS_5705_PLUS(sc))
1913 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1915 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
1917 error = bge_dma_alloc(sc);
1921 /* Set default tuneable values. */
1922 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
1923 sc->bge_rx_coal_ticks = bge_rx_coal_ticks;
1924 sc->bge_tx_coal_ticks = bge_tx_coal_ticks;
1925 sc->bge_rx_max_coal_bds = bge_rx_max_coal_bds;
1926 sc->bge_tx_max_coal_bds = bge_tx_max_coal_bds;
1928 /* Set up ifnet structure */
1930 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1931 ifp->if_ioctl = bge_ioctl;
1932 ifp->if_start = bge_start;
1933 #ifdef DEVICE_POLLING
1934 ifp->if_poll = bge_poll;
1936 ifp->if_watchdog = bge_watchdog;
1937 ifp->if_init = bge_init;
1938 ifp->if_mtu = ETHERMTU;
1939 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1940 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1941 ifq_set_ready(&ifp->if_snd);
1944 * 5700 B0 chips do not support checksumming correctly due
1947 if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) {
1948 ifp->if_capabilities |= IFCAP_HWCSUM;
1949 ifp->if_hwassist = BGE_CSUM_FEATURES;
1951 ifp->if_capenable = ifp->if_capabilities;
1954 * Figure out what sort of media we have by checking the
1955 * hardware config word in the first 32k of NIC internal memory,
1956 * or fall back to examining the EEPROM if necessary.
1957 * Note: on some BCM5700 cards, this value appears to be unset.
1958 * If that's the case, we have to rely on identifying the NIC
1959 * by its PCI subsystem ID, as we do below for the SysKonnect
1962 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
1963 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
1965 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
1967 device_printf(dev, "failed to read EEPROM\n");
1971 hwcfg = ntohl(hwcfg);
1974 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
1975 sc->bge_flags |= BGE_FLAG_TBI;
1977 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
1978 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41)
1979 sc->bge_flags |= BGE_FLAG_TBI;
1981 if (sc->bge_flags & BGE_FLAG_TBI) {
1982 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
1983 bge_ifmedia_upd, bge_ifmedia_sts);
1984 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
1985 ifmedia_add(&sc->bge_ifmedia,
1986 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
1987 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1988 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
1989 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
1992 * Do transceiver setup.
1994 if (mii_phy_probe(dev, &sc->bge_miibus,
1995 bge_ifmedia_upd, bge_ifmedia_sts)) {
1996 device_printf(dev, "MII without any PHY!\n");
2003 * When using the BCM5701 in PCI-X mode, data corruption has
2004 * been observed in the first few bytes of some received packets.
2005 * Aligning the packet buffer in memory eliminates the corruption.
2006 * Unfortunately, this misaligns the packet payloads. On platforms
2007 * which do not support unaligned accesses, we will realign the
2008 * payloads by copying the received packets.
2010 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
2011 (sc->bge_flags & BGE_FLAG_PCIX))
2012 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
2014 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2015 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
2016 sc->bge_link_upd = bge_bcm5700_link_upd;
2017 sc->bge_link_chg = BGE_MACSTAT_MI_INTERRUPT;
2018 } else if (sc->bge_flags & BGE_FLAG_TBI) {
2019 sc->bge_link_upd = bge_tbi_link_upd;
2020 sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED;
2022 sc->bge_link_upd = bge_copper_link_upd;
2023 sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED;
2027 * Create sysctl nodes.
2029 sysctl_ctx_init(&sc->bge_sysctl_ctx);
2030 sc->bge_sysctl_tree = SYSCTL_ADD_NODE(&sc->bge_sysctl_ctx,
2031 SYSCTL_STATIC_CHILDREN(_hw),
2033 device_get_nameunit(dev),
2035 if (sc->bge_sysctl_tree == NULL) {
2036 device_printf(dev, "can't add sysctl node\n");
2041 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2042 SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2043 OID_AUTO, "rx_coal_ticks",
2044 CTLTYPE_INT | CTLFLAG_RW,
2045 sc, 0, bge_sysctl_rx_coal_ticks, "I",
2046 "Receive coalescing ticks (usec).");
2047 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2048 SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2049 OID_AUTO, "tx_coal_ticks",
2050 CTLTYPE_INT | CTLFLAG_RW,
2051 sc, 0, bge_sysctl_tx_coal_ticks, "I",
2052 "Transmit coalescing ticks (usec).");
2053 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2054 SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2055 OID_AUTO, "rx_max_coal_bds",
2056 CTLTYPE_INT | CTLFLAG_RW,
2057 sc, 0, bge_sysctl_rx_max_coal_bds, "I",
2058 "Receive max coalesced BD count.");
2059 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2060 SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2061 OID_AUTO, "tx_max_coal_bds",
2062 CTLTYPE_INT | CTLFLAG_RW,
2063 sc, 0, bge_sysctl_tx_max_coal_bds, "I",
2064 "Transmit max coalesced BD count.");
2067 * Call MI attach routine.
2069 ether_ifattach(ifp, ether_addr, NULL);
2071 error = bus_setup_intr(dev, sc->bge_irq, INTR_MPSAFE,
2072 bge_intr, sc, &sc->bge_intrhand,
2073 ifp->if_serializer);
2075 ether_ifdetach(ifp);
2076 device_printf(dev, "couldn't set up irq\n");
2080 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->bge_irq));
2081 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
2090 bge_detach(device_t dev)
2092 struct bge_softc *sc = device_get_softc(dev);
2094 if (device_is_attached(dev)) {
2095 struct ifnet *ifp = &sc->arpcom.ac_if;
2097 lwkt_serialize_enter(ifp->if_serializer);
2100 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2101 lwkt_serialize_exit(ifp->if_serializer);
2103 ether_ifdetach(ifp);
2106 if (sc->bge_flags & BGE_FLAG_TBI)
2107 ifmedia_removeall(&sc->bge_ifmedia);
2109 device_delete_child(dev, sc->bge_miibus);
2110 bus_generic_detach(dev);
2112 if (sc->bge_irq != NULL)
2113 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2115 if (sc->bge_res != NULL)
2116 bus_release_resource(dev, SYS_RES_MEMORY,
2117 BGE_PCI_BAR0, sc->bge_res);
2119 if (sc->bge_sysctl_tree != NULL)
2120 sysctl_ctx_free(&sc->bge_sysctl_ctx);
2128 bge_reset(struct bge_softc *sc)
2131 uint32_t cachesize, command, pcistate, reset;
2132 void (*write_op)(struct bge_softc *, uint32_t, uint32_t);
2137 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
2138 sc->bge_asicrev != BGE_ASICREV_BCM5906) {
2139 if (sc->bge_flags & BGE_FLAG_PCIE)
2140 write_op = bge_writemem_direct;
2142 write_op = bge_writemem_ind;
2144 write_op = bge_writereg_ind;
2147 /* Save some important PCI state. */
2148 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2149 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2150 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2152 pci_write_config(dev, BGE_PCI_MISC_CTL,
2153 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2154 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2156 /* Disable fastboot on controllers that support it. */
2157 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
2158 sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2159 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2161 if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n");
2162 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2166 * Write the magic number to SRAM at offset 0xB50.
2167 * When firmware finishes its initialization it will
2168 * write ~BGE_MAGIC_NUMBER to the same location.
2170 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2172 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2174 /* XXX: Broadcom Linux driver. */
2175 if (sc->bge_flags & BGE_FLAG_PCIE) {
2176 if (CSR_READ_4(sc, 0x7e2c) == 0x60) /* PCIE 1.0 */
2177 CSR_WRITE_4(sc, 0x7e2c, 0x20);
2178 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2179 /* Prevent PCIE link training during global reset */
2180 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2186 * Set GPHY Power Down Override to leave GPHY
2187 * powered up in D0 uninitialized.
2189 if (BGE_IS_5705_PLUS(sc))
2190 reset |= 0x04000000;
2192 /* Issue global reset */
2193 write_op(sc, BGE_MISC_CFG, reset);
2195 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2196 uint32_t status, ctrl;
2198 status = CSR_READ_4(sc, BGE_VCPU_STATUS);
2199 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
2200 status | BGE_VCPU_STATUS_DRV_RESET);
2201 ctrl = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
2202 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
2203 ctrl & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
2208 /* XXX: Broadcom Linux driver. */
2209 if (sc->bge_flags & BGE_FLAG_PCIE) {
2210 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2213 DELAY(500000); /* wait for link training to complete */
2214 v = pci_read_config(dev, 0xc4, 4);
2215 pci_write_config(dev, 0xc4, v | (1<<15), 4);
2218 * Set PCIE max payload size to 128 bytes and
2219 * clear error status.
2221 pci_write_config(dev, 0xd8, 0xf5000, 4);
2224 /* Reset some of the PCI state that got zapped by reset */
2225 pci_write_config(dev, BGE_PCI_MISC_CTL,
2226 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2227 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2228 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2229 pci_write_config(dev, BGE_PCI_CMD, command, 4);
2230 write_op(sc, BGE_MISC_CFG, (65 << 1));
2232 /* Enable memory arbiter. */
2233 if (BGE_IS_5714_FAMILY(sc)) {
2236 val = CSR_READ_4(sc, BGE_MARB_MODE);
2237 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2239 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2242 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2243 for (i = 0; i < BGE_TIMEOUT; i++) {
2244 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2245 if (val & BGE_VCPU_STATUS_INIT_DONE)
2249 if (i == BGE_TIMEOUT) {
2250 if_printf(&sc->arpcom.ac_if, "reset timed out\n");
2255 * Poll until we see the 1's complement of the magic number.
2256 * This indicates that the firmware initialization
2259 for (i = 0; i < BGE_FIRMWARE_TIMEOUT; i++) {
2260 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2261 if (val == ~BGE_MAGIC_NUMBER)
2265 if (i == BGE_FIRMWARE_TIMEOUT) {
2266 if_printf(&sc->arpcom.ac_if, "firmware handshake "
2267 "timed out, found 0x%08x\n", val);
2273 * XXX Wait for the value of the PCISTATE register to
2274 * return to its original pre-reset state. This is a
2275 * fairly good indicator of reset completion. If we don't
2276 * wait for the reset to fully complete, trying to read
2277 * from the device's non-PCI registers may yield garbage
2280 for (i = 0; i < BGE_TIMEOUT; i++) {
2281 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2286 if (sc->bge_flags & BGE_FLAG_PCIE) {
2287 reset = bge_readmem_ind(sc, 0x7c00);
2288 bge_writemem_ind(sc, 0x7c00, reset | (1 << 25));
2291 /* Fix up byte swapping */
2292 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
2293 BGE_MODECTL_BYTESWAP_DATA);
2295 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2298 * The 5704 in TBI mode apparently needs some special
2299 * adjustment to insure the SERDES drive level is set
2302 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
2303 (sc->bge_flags & BGE_FLAG_TBI)) {
2306 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2307 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2308 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2311 /* XXX: Broadcom Linux driver. */
2312 if ((sc->bge_flags & BGE_FLAG_PCIE) &&
2313 sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2316 v = CSR_READ_4(sc, 0x7c00);
2317 CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2324 * Frame reception handling. This is called if there's a frame
2325 * on the receive return list.
2327 * Note: we have to be able to handle two possibilities here:
2328 * 1) the frame is from the jumbo recieve ring
2329 * 2) the frame is from the standard receive ring
2333 bge_rxeof(struct bge_softc *sc)
2336 int stdcnt = 0, jumbocnt = 0;
2337 struct mbuf_chain chain[MAXCPU];
2339 if (sc->bge_rx_saved_considx ==
2340 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx)
2343 ether_input_chain_init(chain);
2345 ifp = &sc->arpcom.ac_if;
2347 while (sc->bge_rx_saved_considx !=
2348 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2349 struct bge_rx_bd *cur_rx;
2351 struct mbuf *m = NULL;
2352 uint16_t vlan_tag = 0;
2356 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2358 rxidx = cur_rx->bge_idx;
2359 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2362 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2364 vlan_tag = cur_rx->bge_vlan_tag;
2367 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2368 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2371 if (rxidx != sc->bge_jumbo) {
2373 if_printf(ifp, "sw jumbo index(%d) "
2374 "and hw jumbo index(%d) mismatch, drop!\n",
2375 sc->bge_jumbo, rxidx);
2376 bge_setup_rxdesc_jumbo(sc, rxidx);
2380 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx].bge_mbuf;
2381 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2383 bge_setup_rxdesc_jumbo(sc, sc->bge_jumbo);
2386 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 0)) {
2388 bge_setup_rxdesc_jumbo(sc, sc->bge_jumbo);
2392 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2395 if (rxidx != sc->bge_std) {
2397 if_printf(ifp, "sw std index(%d) "
2398 "and hw std index(%d) mismatch, drop!\n",
2399 sc->bge_std, rxidx);
2400 bge_setup_rxdesc_std(sc, rxidx);
2404 m = sc->bge_cdata.bge_rx_std_chain[rxidx].bge_mbuf;
2405 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2407 bge_setup_rxdesc_std(sc, sc->bge_std);
2410 if (bge_newbuf_std(sc, sc->bge_std, 0)) {
2412 bge_setup_rxdesc_std(sc, sc->bge_std);
2420 * The i386 allows unaligned accesses, but for other
2421 * platforms we must make sure the payload is aligned.
2423 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
2424 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2426 m->m_data += ETHER_ALIGN;
2429 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2430 m->m_pkthdr.rcvif = ifp;
2432 if (ifp->if_capenable & IFCAP_RXCSUM) {
2433 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2434 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2435 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2436 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2438 if ((cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) &&
2439 m->m_pkthdr.len >= BGE_MIN_FRAME) {
2440 m->m_pkthdr.csum_data =
2441 cur_rx->bge_tcp_udp_csum;
2442 m->m_pkthdr.csum_flags |=
2443 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2448 * If we received a packet with a vlan tag, pass it
2449 * to vlan_input() instead of ether_input().
2452 m->m_flags |= M_VLANTAG;
2453 m->m_pkthdr.ether_vlantag = vlan_tag;
2454 have_tag = vlan_tag = 0;
2456 ether_input_chain(ifp, m, NULL, chain);
2459 ether_input_dispatch(chain);
2461 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2463 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2465 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2469 bge_txeof(struct bge_softc *sc)
2471 struct bge_tx_bd *cur_tx = NULL;
2474 if (sc->bge_tx_saved_considx ==
2475 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
2478 ifp = &sc->arpcom.ac_if;
2481 * Go through our tx ring and free mbufs for those
2482 * frames that have been sent.
2484 while (sc->bge_tx_saved_considx !=
2485 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2488 idx = sc->bge_tx_saved_considx;
2489 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2490 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2492 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2493 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
2494 sc->bge_cdata.bge_tx_dmamap[idx]);
2495 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2496 sc->bge_cdata.bge_tx_chain[idx] = NULL;
2499 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2503 if (cur_tx != NULL &&
2504 (BGE_TX_RING_CNT - sc->bge_txcnt) >=
2505 (BGE_NSEG_RSVD + BGE_NSEG_SPARE))
2506 ifp->if_flags &= ~IFF_OACTIVE;
2508 if (sc->bge_txcnt == 0)
2511 if (!ifq_is_empty(&ifp->if_snd))
2515 #ifdef DEVICE_POLLING
2518 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2520 struct bge_softc *sc = ifp->if_softc;
2525 bge_disable_intr(sc);
2527 case POLL_DEREGISTER:
2528 bge_enable_intr(sc);
2530 case POLL_AND_CHECK_STATUS:
2532 * Process link state changes.
2534 status = CSR_READ_4(sc, BGE_MAC_STS);
2535 if ((status & sc->bge_link_chg) || sc->bge_link_evt) {
2536 sc->bge_link_evt = 0;
2537 sc->bge_link_upd(sc, status);
2541 if (ifp->if_flags & IFF_RUNNING) {
2554 struct bge_softc *sc = xsc;
2555 struct ifnet *ifp = &sc->arpcom.ac_if;
2561 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
2562 * disable interrupts by writing nonzero like we used to, since with
2563 * our current organization this just gives complications and
2564 * pessimizations for re-enabling interrupts. We used to have races
2565 * instead of the necessary complications. Disabling interrupts
2566 * would just reduce the chance of a status update while we are
2567 * running (by switching to the interrupt-mode coalescence
2568 * parameters), but this chance is already very low so it is more
2569 * efficient to get another interrupt than prevent it.
2571 * We do the ack first to ensure another interrupt if there is a
2572 * status update after the ack. We don't check for the status
2573 * changing later because it is more efficient to get another
2574 * interrupt than prevent it, not quite as above (not checking is
2575 * a smaller optimization than not toggling the interrupt enable,
2576 * since checking doesn't involve PCI accesses and toggling require
2577 * the status check). So toggling would probably be a pessimization
2578 * even with MSI. It would only be needed for using a task queue.
2580 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
2583 * Process link state changes.
2585 status = CSR_READ_4(sc, BGE_MAC_STS);
2586 if ((status & sc->bge_link_chg) || sc->bge_link_evt) {
2587 sc->bge_link_evt = 0;
2588 sc->bge_link_upd(sc, status);
2591 if (ifp->if_flags & IFF_RUNNING) {
2592 /* Check RX return ring producer/consumer */
2595 /* Check TX ring producer/consumer */
2599 if (sc->bge_coal_chg)
2600 bge_coal_change(sc);
2606 struct bge_softc *sc = xsc;
2607 struct ifnet *ifp = &sc->arpcom.ac_if;
2609 lwkt_serialize_enter(ifp->if_serializer);
2611 if (BGE_IS_5705_PLUS(sc))
2612 bge_stats_update_regs(sc);
2614 bge_stats_update(sc);
2616 if (sc->bge_flags & BGE_FLAG_TBI) {
2618 * Since in TBI mode auto-polling can't be used we should poll
2619 * link status manually. Here we register pending link event
2620 * and trigger interrupt.
2623 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
2624 } else if (!sc->bge_link) {
2625 mii_tick(device_get_softc(sc->bge_miibus));
2628 callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2630 lwkt_serialize_exit(ifp->if_serializer);
2634 bge_stats_update_regs(struct bge_softc *sc)
2636 struct ifnet *ifp = &sc->arpcom.ac_if;
2637 struct bge_mac_stats_regs stats;
2641 s = (uint32_t *)&stats;
2642 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2643 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2647 ifp->if_collisions +=
2648 (stats.dot3StatsSingleCollisionFrames +
2649 stats.dot3StatsMultipleCollisionFrames +
2650 stats.dot3StatsExcessiveCollisions +
2651 stats.dot3StatsLateCollisions) -
2656 bge_stats_update(struct bge_softc *sc)
2658 struct ifnet *ifp = &sc->arpcom.ac_if;
2661 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2663 #define READ_STAT(sc, stats, stat) \
2664 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2666 ifp->if_collisions +=
2667 (READ_STAT(sc, stats,
2668 txstats.dot3StatsSingleCollisionFrames.bge_addr_lo) +
2669 READ_STAT(sc, stats,
2670 txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo) +
2671 READ_STAT(sc, stats,
2672 txstats.dot3StatsExcessiveCollisions.bge_addr_lo) +
2673 READ_STAT(sc, stats,
2674 txstats.dot3StatsLateCollisions.bge_addr_lo)) -
2680 ifp->if_collisions +=
2681 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2682 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2683 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2684 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2690 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2691 * pointers to descriptors.
2694 bge_encap(struct bge_softc *sc, struct mbuf **m_head0, uint32_t *txidx)
2696 struct bge_tx_bd *d = NULL;
2697 uint16_t csum_flags = 0;
2698 bus_dma_segment_t segs[BGE_NSEG_NEW];
2700 int error, maxsegs, nsegs, idx, i;
2701 struct mbuf *m_head = *m_head0;
2703 if (m_head->m_pkthdr.csum_flags) {
2704 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2705 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2706 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2707 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2708 if (m_head->m_flags & M_LASTFRAG)
2709 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2710 else if (m_head->m_flags & M_FRAG)
2711 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2715 map = sc->bge_cdata.bge_tx_dmamap[idx];
2717 maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - BGE_NSEG_RSVD;
2718 KASSERT(maxsegs >= BGE_NSEG_SPARE,
2719 ("not enough segments %d\n", maxsegs));
2721 if (maxsegs > BGE_NSEG_NEW)
2722 maxsegs = BGE_NSEG_NEW;
2725 * Pad outbound frame to BGE_MIN_FRAME for an unusual reason.
2726 * The bge hardware will pad out Tx runts to BGE_MIN_FRAME,
2727 * but when such padded frames employ the bge IP/TCP checksum
2728 * offload, the hardware checksum assist gives incorrect results
2729 * (possibly from incorporating its own padding into the UDP/TCP
2730 * checksum; who knows). If we pad such runts with zeros, the
2731 * onboard checksum comes out correct.
2733 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
2734 m_head->m_pkthdr.len < BGE_MIN_FRAME) {
2735 error = m_devpad(m_head, BGE_MIN_FRAME);
2740 error = bus_dmamap_load_mbuf_defrag(sc->bge_cdata.bge_tx_mtag, map,
2741 m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
2746 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
2748 for (i = 0; ; i++) {
2749 d = &sc->bge_ldata.bge_tx_ring[idx];
2751 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
2752 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
2753 d->bge_len = segs[i].ds_len;
2754 d->bge_flags = csum_flags;
2758 BGE_INC(idx, BGE_TX_RING_CNT);
2760 /* Mark the last segment as end of packet... */
2761 d->bge_flags |= BGE_TXBDFLAG_END;
2763 /* Set vlan tag to the first segment of the packet. */
2764 d = &sc->bge_ldata.bge_tx_ring[*txidx];
2765 if (m_head->m_flags & M_VLANTAG) {
2766 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2767 d->bge_vlan_tag = m_head->m_pkthdr.ether_vlantag;
2769 d->bge_vlan_tag = 0;
2773 * Insure that the map for this transmission is placed at
2774 * the array index of the last descriptor in this chain.
2776 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
2777 sc->bge_cdata.bge_tx_dmamap[idx] = map;
2778 sc->bge_cdata.bge_tx_chain[idx] = m_head;
2779 sc->bge_txcnt += nsegs;
2781 BGE_INC(idx, BGE_TX_RING_CNT);
2792 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2793 * to the mbuf data regions directly in the transmit descriptors.
2796 bge_start(struct ifnet *ifp)
2798 struct bge_softc *sc = ifp->if_softc;
2799 struct mbuf *m_head = NULL;
2803 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
2806 prodidx = sc->bge_tx_prodidx;
2809 while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
2810 m_head = ifq_dequeue(&ifp->if_snd, NULL);
2816 * The code inside the if() block is never reached since we
2817 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
2818 * requests to checksum TCP/UDP in a fragmented packet.
2821 * safety overkill. If this is a fragmented packet chain
2822 * with delayed TCP/UDP checksums, then only encapsulate
2823 * it if we have enough descriptors to handle the entire
2825 * (paranoia -- may not actually be needed)
2827 if ((m_head->m_flags & M_FIRSTFRAG) &&
2828 (m_head->m_pkthdr.csum_flags & CSUM_DELAY_DATA)) {
2829 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2830 m_head->m_pkthdr.csum_data + BGE_NSEG_RSVD) {
2831 ifp->if_flags |= IFF_OACTIVE;
2832 ifq_prepend(&ifp->if_snd, m_head);
2838 * Sanity check: avoid coming within BGE_NSEG_RSVD
2839 * descriptors of the end of the ring. Also make
2840 * sure there are BGE_NSEG_SPARE descriptors for
2841 * jumbo buffers' defragmentation.
2843 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2844 (BGE_NSEG_RSVD + BGE_NSEG_SPARE)) {
2845 ifp->if_flags |= IFF_OACTIVE;
2846 ifq_prepend(&ifp->if_snd, m_head);
2851 * Pack the data into the transmit ring. If we
2852 * don't have room, set the OACTIVE flag and wait
2853 * for the NIC to drain the ring.
2855 if (bge_encap(sc, &m_head, &prodidx)) {
2856 ifp->if_flags |= IFF_OACTIVE;
2862 ETHER_BPF_MTAP(ifp, m_head);
2869 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2870 /* 5700 b2 errata */
2871 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
2872 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2874 sc->bge_tx_prodidx = prodidx;
2877 * Set a timeout in case the chip goes out to lunch.
2885 struct bge_softc *sc = xsc;
2886 struct ifnet *ifp = &sc->arpcom.ac_if;
2889 ASSERT_SERIALIZED(ifp->if_serializer);
2891 if (ifp->if_flags & IFF_RUNNING)
2894 /* Cancel pending I/O and flush buffers. */
2900 * Init the various state machines, ring
2901 * control blocks and firmware.
2903 if (bge_blockinit(sc)) {
2904 if_printf(ifp, "initialization failure\n");
2910 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
2911 ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
2913 /* Load our MAC address. */
2914 m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
2915 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2916 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2918 /* Enable or disable promiscuous mode as needed. */
2921 /* Program multicast filter. */
2925 if (bge_init_rx_ring_std(sc)) {
2926 if_printf(ifp, "RX ring initialization failed\n");
2932 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
2933 * memory to insure that the chip has in fact read the first
2934 * entry of the ring.
2936 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
2938 for (i = 0; i < 10; i++) {
2940 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
2941 if (v == (MCLBYTES - ETHER_ALIGN))
2945 if_printf(ifp, "5705 A0 chip failed to load RX ring\n");
2948 /* Init jumbo RX ring. */
2949 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) {
2950 if (bge_init_rx_ring_jumbo(sc)) {
2951 if_printf(ifp, "Jumbo RX ring initialization failed\n");
2957 /* Init our RX return ring index */
2958 sc->bge_rx_saved_considx = 0;
2961 bge_init_tx_ring(sc);
2963 /* Turn on transmitter */
2964 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
2966 /* Turn on receiver */
2967 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2969 /* Tell firmware we're alive. */
2970 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2972 /* Enable host interrupts if polling(4) is not enabled. */
2973 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
2974 #ifdef DEVICE_POLLING
2975 if (ifp->if_flags & IFF_POLLING)
2976 bge_disable_intr(sc);
2979 bge_enable_intr(sc);
2981 bge_ifmedia_upd(ifp);
2983 ifp->if_flags |= IFF_RUNNING;
2984 ifp->if_flags &= ~IFF_OACTIVE;
2986 callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2990 * Set media options.
2993 bge_ifmedia_upd(struct ifnet *ifp)
2995 struct bge_softc *sc = ifp->if_softc;
2997 /* If this is a 1000baseX NIC, enable the TBI port. */
2998 if (sc->bge_flags & BGE_FLAG_TBI) {
2999 struct ifmedia *ifm = &sc->bge_ifmedia;
3001 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3004 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3007 * The BCM5704 ASIC appears to have a special
3008 * mechanism for programming the autoneg
3009 * advertisement registers in TBI mode.
3011 if (!bge_fake_autoneg &&
3012 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3015 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3016 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3017 sgdig |= BGE_SGDIGCFG_AUTO |
3018 BGE_SGDIGCFG_PAUSE_CAP |
3019 BGE_SGDIGCFG_ASYM_PAUSE;
3020 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3021 sgdig | BGE_SGDIGCFG_SEND);
3023 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3027 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3028 BGE_CLRBIT(sc, BGE_MAC_MODE,
3029 BGE_MACMODE_HALF_DUPLEX);
3031 BGE_SETBIT(sc, BGE_MAC_MODE,
3032 BGE_MACMODE_HALF_DUPLEX);
3039 struct mii_data *mii = device_get_softc(sc->bge_miibus);
3043 if (mii->mii_instance) {
3044 struct mii_softc *miisc;
3046 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3047 mii_phy_reset(miisc);
3055 * Report current media status.
3058 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3060 struct bge_softc *sc = ifp->if_softc;
3062 if (sc->bge_flags & BGE_FLAG_TBI) {
3063 ifmr->ifm_status = IFM_AVALID;
3064 ifmr->ifm_active = IFM_ETHER;
3065 if (CSR_READ_4(sc, BGE_MAC_STS) &
3066 BGE_MACSTAT_TBI_PCS_SYNCHED) {
3067 ifmr->ifm_status |= IFM_ACTIVE;
3069 ifmr->ifm_active |= IFM_NONE;
3073 ifmr->ifm_active |= IFM_1000_SX;
3074 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3075 ifmr->ifm_active |= IFM_HDX;
3077 ifmr->ifm_active |= IFM_FDX;
3079 struct mii_data *mii = device_get_softc(sc->bge_miibus);
3082 ifmr->ifm_active = mii->mii_media_active;
3083 ifmr->ifm_status = mii->mii_media_status;
3088 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
3090 struct bge_softc *sc = ifp->if_softc;
3091 struct ifreq *ifr = (struct ifreq *)data;
3092 int mask, error = 0;
3094 ASSERT_SERIALIZED(ifp->if_serializer);
3098 if ((!BGE_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) ||
3099 (BGE_IS_JUMBO_CAPABLE(sc) &&
3100 ifr->ifr_mtu > BGE_JUMBO_MTU)) {
3102 } else if (ifp->if_mtu != ifr->ifr_mtu) {
3103 ifp->if_mtu = ifr->ifr_mtu;
3104 ifp->if_flags &= ~IFF_RUNNING;
3109 if (ifp->if_flags & IFF_UP) {
3110 if (ifp->if_flags & IFF_RUNNING) {
3111 mask = ifp->if_flags ^ sc->bge_if_flags;
3114 * If only the state of the PROMISC flag
3115 * changed, then just use the 'set promisc
3116 * mode' command instead of reinitializing
3117 * the entire NIC. Doing a full re-init
3118 * means reloading the firmware and waiting
3119 * for it to start up, which may take a
3120 * second or two. Similarly for ALLMULTI.
3122 if (mask & IFF_PROMISC)
3124 if (mask & IFF_ALLMULTI)
3130 if (ifp->if_flags & IFF_RUNNING)
3133 sc->bge_if_flags = ifp->if_flags;
3137 if (ifp->if_flags & IFF_RUNNING)
3142 if (sc->bge_flags & BGE_FLAG_TBI) {
3143 error = ifmedia_ioctl(ifp, ifr,
3144 &sc->bge_ifmedia, command);
3146 struct mii_data *mii;
3148 mii = device_get_softc(sc->bge_miibus);
3149 error = ifmedia_ioctl(ifp, ifr,
3150 &mii->mii_media, command);
3154 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3155 if (mask & IFCAP_HWCSUM) {
3156 ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
3157 if (IFCAP_HWCSUM & ifp->if_capenable)
3158 ifp->if_hwassist = BGE_CSUM_FEATURES;
3160 ifp->if_hwassist = 0;
3164 error = ether_ioctl(ifp, command, data);
3171 bge_watchdog(struct ifnet *ifp)
3173 struct bge_softc *sc = ifp->if_softc;
3175 if_printf(ifp, "watchdog timeout -- resetting\n");
3177 ifp->if_flags &= ~IFF_RUNNING;
3182 if (!ifq_is_empty(&ifp->if_snd))
3187 * Stop the adapter and free any mbufs allocated to the
3191 bge_stop(struct bge_softc *sc)
3193 struct ifnet *ifp = &sc->arpcom.ac_if;
3194 struct ifmedia_entry *ifm;
3195 struct mii_data *mii = NULL;
3198 ASSERT_SERIALIZED(ifp->if_serializer);
3200 if ((sc->bge_flags & BGE_FLAG_TBI) == 0)
3201 mii = device_get_softc(sc->bge_miibus);
3203 callout_stop(&sc->bge_stat_timer);
3206 * Disable all of the receiver blocks
3208 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3209 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3210 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3211 if (!BGE_IS_5705_PLUS(sc))
3212 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3213 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3214 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3215 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3218 * Disable all of the transmit blocks
3220 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3221 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3222 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3223 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3224 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3225 if (!BGE_IS_5705_PLUS(sc))
3226 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3227 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3230 * Shut down all of the memory managers and related
3233 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3234 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3235 if (!BGE_IS_5705_PLUS(sc))
3236 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3237 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3238 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3239 if (!BGE_IS_5705_PLUS(sc)) {
3240 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3241 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3244 /* Disable host interrupts. */
3245 bge_disable_intr(sc);
3248 * Tell firmware we're shutting down.
3250 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3252 /* Free the RX lists. */
3253 bge_free_rx_ring_std(sc);
3255 /* Free jumbo RX list. */
3256 if (BGE_IS_JUMBO_CAPABLE(sc))
3257 bge_free_rx_ring_jumbo(sc);
3259 /* Free TX buffers. */
3260 bge_free_tx_ring(sc);
3263 * Isolate/power down the PHY, but leave the media selection
3264 * unchanged so that things will be put back to normal when
3265 * we bring the interface back up.
3267 * 'mii' may be NULL in the following cases:
3268 * - The device uses TBI.
3269 * - bge_stop() is called by bge_detach().
3272 itmp = ifp->if_flags;
3273 ifp->if_flags |= IFF_UP;
3274 ifm = mii->mii_media.ifm_cur;
3275 mtmp = ifm->ifm_media;
3276 ifm->ifm_media = IFM_ETHER|IFM_NONE;
3278 ifm->ifm_media = mtmp;
3279 ifp->if_flags = itmp;
3283 sc->bge_coal_chg = 0;
3285 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3287 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3292 * Stop all chip I/O so that the kernel's probe routines don't
3293 * get confused by errant DMAs when rebooting.
3296 bge_shutdown(device_t dev)
3298 struct bge_softc *sc = device_get_softc(dev);
3299 struct ifnet *ifp = &sc->arpcom.ac_if;
3301 lwkt_serialize_enter(ifp->if_serializer);
3304 lwkt_serialize_exit(ifp->if_serializer);
3308 bge_suspend(device_t dev)
3310 struct bge_softc *sc = device_get_softc(dev);
3311 struct ifnet *ifp = &sc->arpcom.ac_if;
3313 lwkt_serialize_enter(ifp->if_serializer);
3315 lwkt_serialize_exit(ifp->if_serializer);
3321 bge_resume(device_t dev)
3323 struct bge_softc *sc = device_get_softc(dev);
3324 struct ifnet *ifp = &sc->arpcom.ac_if;
3326 lwkt_serialize_enter(ifp->if_serializer);
3328 if (ifp->if_flags & IFF_UP) {
3331 if (!ifq_is_empty(&ifp->if_snd))
3335 lwkt_serialize_exit(ifp->if_serializer);
3341 bge_setpromisc(struct bge_softc *sc)
3343 struct ifnet *ifp = &sc->arpcom.ac_if;
3345 if (ifp->if_flags & IFF_PROMISC)
3346 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3348 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3352 bge_dma_free(struct bge_softc *sc)
3356 /* Destroy RX mbuf DMA stuffs. */
3357 if (sc->bge_cdata.bge_rx_mtag != NULL) {
3358 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3359 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
3360 sc->bge_cdata.bge_rx_std_dmamap[i]);
3362 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
3363 sc->bge_cdata.bge_rx_tmpmap);
3364 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
3367 /* Destroy TX mbuf DMA stuffs. */
3368 if (sc->bge_cdata.bge_tx_mtag != NULL) {
3369 for (i = 0; i < BGE_TX_RING_CNT; i++) {
3370 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
3371 sc->bge_cdata.bge_tx_dmamap[i]);
3373 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
3376 /* Destroy standard RX ring */
3377 bge_dma_block_free(sc->bge_cdata.bge_rx_std_ring_tag,
3378 sc->bge_cdata.bge_rx_std_ring_map,
3379 sc->bge_ldata.bge_rx_std_ring);
3381 if (BGE_IS_JUMBO_CAPABLE(sc))
3382 bge_free_jumbo_mem(sc);
3384 /* Destroy RX return ring */
3385 bge_dma_block_free(sc->bge_cdata.bge_rx_return_ring_tag,
3386 sc->bge_cdata.bge_rx_return_ring_map,
3387 sc->bge_ldata.bge_rx_return_ring);
3389 /* Destroy TX ring */
3390 bge_dma_block_free(sc->bge_cdata.bge_tx_ring_tag,
3391 sc->bge_cdata.bge_tx_ring_map,
3392 sc->bge_ldata.bge_tx_ring);
3394 /* Destroy status block */
3395 bge_dma_block_free(sc->bge_cdata.bge_status_tag,
3396 sc->bge_cdata.bge_status_map,
3397 sc->bge_ldata.bge_status_block);
3399 /* Destroy statistics block */
3400 bge_dma_block_free(sc->bge_cdata.bge_stats_tag,
3401 sc->bge_cdata.bge_stats_map,
3402 sc->bge_ldata.bge_stats);
3404 /* Destroy the parent tag */
3405 if (sc->bge_cdata.bge_parent_tag != NULL)
3406 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
3410 bge_dma_alloc(struct bge_softc *sc)
3412 struct ifnet *ifp = &sc->arpcom.ac_if;
3416 * Allocate the parent bus DMA tag appropriate for PCI.
3418 error = bus_dma_tag_create(NULL, 1, 0,
3419 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3421 BUS_SPACE_MAXSIZE_32BIT, 0,
3422 BUS_SPACE_MAXSIZE_32BIT,
3423 0, &sc->bge_cdata.bge_parent_tag);
3425 if_printf(ifp, "could not allocate parent dma tag\n");
3430 * Create DMA tag and maps for RX mbufs.
3432 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0,
3433 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3434 NULL, NULL, MCLBYTES, 1, MCLBYTES,
3435 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
3436 &sc->bge_cdata.bge_rx_mtag);
3438 if_printf(ifp, "could not allocate RX mbuf dma tag\n");
3442 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag,
3443 BUS_DMA_WAITOK, &sc->bge_cdata.bge_rx_tmpmap);
3445 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
3446 sc->bge_cdata.bge_rx_mtag = NULL;
3450 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3451 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag,
3453 &sc->bge_cdata.bge_rx_std_dmamap[i]);
3457 for (j = 0; j < i; ++j) {
3458 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
3459 sc->bge_cdata.bge_rx_std_dmamap[j]);
3461 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
3462 sc->bge_cdata.bge_rx_mtag = NULL;
3464 if_printf(ifp, "could not create DMA map for RX\n");
3470 * Create DMA tag and maps for TX mbufs.
3472 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0,
3473 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3475 BGE_JUMBO_FRAMELEN, BGE_NSEG_NEW, MCLBYTES,
3476 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK |
3478 &sc->bge_cdata.bge_tx_mtag);
3480 if_printf(ifp, "could not allocate TX mbuf dma tag\n");
3484 for (i = 0; i < BGE_TX_RING_CNT; i++) {
3485 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag,
3486 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
3487 &sc->bge_cdata.bge_tx_dmamap[i]);
3491 for (j = 0; j < i; ++j) {
3492 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
3493 sc->bge_cdata.bge_tx_dmamap[j]);
3495 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
3496 sc->bge_cdata.bge_tx_mtag = NULL;
3498 if_printf(ifp, "could not create DMA map for TX\n");
3504 * Create DMA stuffs for standard RX ring.
3506 error = bge_dma_block_alloc(sc, BGE_STD_RX_RING_SZ,
3507 &sc->bge_cdata.bge_rx_std_ring_tag,
3508 &sc->bge_cdata.bge_rx_std_ring_map,
3509 (void *)&sc->bge_ldata.bge_rx_std_ring,
3510 &sc->bge_ldata.bge_rx_std_ring_paddr);
3512 if_printf(ifp, "could not create std RX ring\n");
3517 * Create jumbo buffer pool.
3519 if (BGE_IS_JUMBO_CAPABLE(sc)) {
3520 error = bge_alloc_jumbo_mem(sc);
3522 if_printf(ifp, "could not create jumbo buffer pool\n");
3528 * Create DMA stuffs for RX return ring.
3530 error = bge_dma_block_alloc(sc, BGE_RX_RTN_RING_SZ(sc),
3531 &sc->bge_cdata.bge_rx_return_ring_tag,
3532 &sc->bge_cdata.bge_rx_return_ring_map,
3533 (void *)&sc->bge_ldata.bge_rx_return_ring,
3534 &sc->bge_ldata.bge_rx_return_ring_paddr);
3536 if_printf(ifp, "could not create RX ret ring\n");
3541 * Create DMA stuffs for TX ring.
3543 error = bge_dma_block_alloc(sc, BGE_TX_RING_SZ,
3544 &sc->bge_cdata.bge_tx_ring_tag,
3545 &sc->bge_cdata.bge_tx_ring_map,
3546 (void *)&sc->bge_ldata.bge_tx_ring,
3547 &sc->bge_ldata.bge_tx_ring_paddr);
3549 if_printf(ifp, "could not create TX ring\n");
3554 * Create DMA stuffs for status block.
3556 error = bge_dma_block_alloc(sc, BGE_STATUS_BLK_SZ,
3557 &sc->bge_cdata.bge_status_tag,
3558 &sc->bge_cdata.bge_status_map,
3559 (void *)&sc->bge_ldata.bge_status_block,
3560 &sc->bge_ldata.bge_status_block_paddr);
3562 if_printf(ifp, "could not create status block\n");
3567 * Create DMA stuffs for statistics block.
3569 error = bge_dma_block_alloc(sc, BGE_STATS_SZ,
3570 &sc->bge_cdata.bge_stats_tag,
3571 &sc->bge_cdata.bge_stats_map,
3572 (void *)&sc->bge_ldata.bge_stats,
3573 &sc->bge_ldata.bge_stats_paddr);
3575 if_printf(ifp, "could not create stats block\n");
3582 bge_dma_block_alloc(struct bge_softc *sc, bus_size_t size, bus_dma_tag_t *tag,
3583 bus_dmamap_t *map, void **addr, bus_addr_t *paddr)
3588 error = bus_dmamem_coherent(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0,
3589 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3590 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3594 *tag = dmem.dmem_tag;
3595 *map = dmem.dmem_map;
3596 *addr = dmem.dmem_addr;
3597 *paddr = dmem.dmem_busaddr;
3603 bge_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr)
3606 bus_dmamap_unload(tag, map);
3607 bus_dmamem_free(tag, addr, map);
3608 bus_dma_tag_destroy(tag);
3613 * Grrr. The link status word in the status block does
3614 * not work correctly on the BCM5700 rev AX and BX chips,
3615 * according to all available information. Hence, we have
3616 * to enable MII interrupts in order to properly obtain
3617 * async link changes. Unfortunately, this also means that
3618 * we have to read the MAC status register to detect link
3619 * changes, thereby adding an additional register access to
3620 * the interrupt handler.
3622 * XXX: perhaps link state detection procedure used for
3623 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
3626 bge_bcm5700_link_upd(struct bge_softc *sc, uint32_t status __unused)
3628 struct ifnet *ifp = &sc->arpcom.ac_if;
3629 struct mii_data *mii = device_get_softc(sc->bge_miibus);
3633 if (!sc->bge_link &&
3634 (mii->mii_media_status & IFM_ACTIVE) &&
3635 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3638 if_printf(ifp, "link UP\n");
3639 } else if (sc->bge_link &&
3640 (!(mii->mii_media_status & IFM_ACTIVE) ||
3641 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3644 if_printf(ifp, "link DOWN\n");
3647 /* Clear the interrupt. */
3648 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_MI_INTERRUPT);
3649 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
3650 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, BRGPHY_INTRS);
3654 bge_tbi_link_upd(struct bge_softc *sc, uint32_t status)
3656 struct ifnet *ifp = &sc->arpcom.ac_if;
3658 #define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE)
3661 * Sometimes PCS encoding errors are detected in
3662 * TBI mode (on fiber NICs), and for some reason
3663 * the chip will signal them as link changes.
3664 * If we get a link change event, but the 'PCS
3665 * encoding error' bit in the MAC status register
3666 * is set, don't bother doing a link check.
3667 * This avoids spurious "gigabit link up" messages
3668 * that sometimes appear on fiber NICs during
3669 * periods of heavy traffic.
3671 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3672 if (!sc->bge_link) {
3674 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3675 BGE_CLRBIT(sc, BGE_MAC_MODE,
3676 BGE_MACMODE_TBI_SEND_CFGS);
3678 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3681 if_printf(ifp, "link UP\n");
3683 ifp->if_link_state = LINK_STATE_UP;
3684 if_link_state_change(ifp);
3686 } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) {
3691 if_printf(ifp, "link DOWN\n");
3693 ifp->if_link_state = LINK_STATE_DOWN;
3694 if_link_state_change(ifp);
3698 #undef PCS_ENCODE_ERR
3700 /* Clear the attention. */
3701 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3702 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3703 BGE_MACSTAT_LINK_CHANGED);
3707 bge_copper_link_upd(struct bge_softc *sc, uint32_t status __unused)
3710 * Check that the AUTOPOLL bit is set before
3711 * processing the event as a real link change.
3712 * Turning AUTOPOLL on and off in the MII read/write
3713 * functions will often trigger a link status
3714 * interrupt for no reason.
3716 if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
3717 struct ifnet *ifp = &sc->arpcom.ac_if;
3718 struct mii_data *mii = device_get_softc(sc->bge_miibus);
3722 if (!sc->bge_link &&
3723 (mii->mii_media_status & IFM_ACTIVE) &&
3724 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3727 if_printf(ifp, "link UP\n");
3728 } else if (sc->bge_link &&
3729 (!(mii->mii_media_status & IFM_ACTIVE) ||
3730 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3733 if_printf(ifp, "link DOWN\n");
3737 /* Clear the attention. */
3738 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3739 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3740 BGE_MACSTAT_LINK_CHANGED);
3744 bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS)
3746 struct bge_softc *sc = arg1;
3748 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
3749 &sc->bge_rx_coal_ticks,
3750 BGE_RX_COAL_TICKS_CHG);
3754 bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS)
3756 struct bge_softc *sc = arg1;
3758 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
3759 &sc->bge_tx_coal_ticks,
3760 BGE_TX_COAL_TICKS_CHG);
3764 bge_sysctl_rx_max_coal_bds(SYSCTL_HANDLER_ARGS)
3766 struct bge_softc *sc = arg1;
3768 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
3769 &sc->bge_rx_max_coal_bds,
3770 BGE_RX_MAX_COAL_BDS_CHG);
3774 bge_sysctl_tx_max_coal_bds(SYSCTL_HANDLER_ARGS)
3776 struct bge_softc *sc = arg1;
3778 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
3779 &sc->bge_tx_max_coal_bds,
3780 BGE_TX_MAX_COAL_BDS_CHG);
3784 bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal,
3785 uint32_t coal_chg_mask)
3787 struct bge_softc *sc = arg1;
3788 struct ifnet *ifp = &sc->arpcom.ac_if;
3791 lwkt_serialize_enter(ifp->if_serializer);
3794 error = sysctl_handle_int(oidp, &v, 0, req);
3795 if (!error && req->newptr != NULL) {
3800 sc->bge_coal_chg |= coal_chg_mask;
3804 lwkt_serialize_exit(ifp->if_serializer);
3809 bge_coal_change(struct bge_softc *sc)
3811 struct ifnet *ifp = &sc->arpcom.ac_if;
3814 ASSERT_SERIALIZED(ifp->if_serializer);
3816 if (sc->bge_coal_chg & BGE_RX_COAL_TICKS_CHG) {
3817 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS,
3818 sc->bge_rx_coal_ticks);
3820 val = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
3823 if_printf(ifp, "rx_coal_ticks -> %u\n",
3824 sc->bge_rx_coal_ticks);
3828 if (sc->bge_coal_chg & BGE_TX_COAL_TICKS_CHG) {
3829 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS,
3830 sc->bge_tx_coal_ticks);
3832 val = CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS);
3835 if_printf(ifp, "tx_coal_ticks -> %u\n",
3836 sc->bge_tx_coal_ticks);
3840 if (sc->bge_coal_chg & BGE_RX_MAX_COAL_BDS_CHG) {
3841 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS,
3842 sc->bge_rx_max_coal_bds);
3844 val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
3847 if_printf(ifp, "rx_max_coal_bds -> %u\n",
3848 sc->bge_rx_max_coal_bds);
3852 if (sc->bge_coal_chg & BGE_TX_MAX_COAL_BDS_CHG) {
3853 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS,
3854 sc->bge_tx_max_coal_bds);
3856 val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS);
3859 if_printf(ifp, "tx_max_coal_bds -> %u\n",
3860 sc->bge_tx_max_coal_bds);
3864 sc->bge_coal_chg = 0;
3868 bge_enable_intr(struct bge_softc *sc)
3870 struct ifnet *ifp = &sc->arpcom.ac_if;
3872 lwkt_serialize_handler_enable(ifp->if_serializer);
3877 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3880 * Unmask the interrupt when we stop polling.
3882 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3885 * Trigger another interrupt, since above writing
3886 * to interrupt mailbox0 may acknowledge pending
3889 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3893 bge_disable_intr(struct bge_softc *sc)
3895 struct ifnet *ifp = &sc->arpcom.ac_if;
3898 * Mask the interrupt when we start polling.
3900 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3903 * Acknowledge possible asserted interrupt.
3905 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3907 lwkt_serialize_handler_disable(ifp->if_serializer);
3911 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
3916 mac_addr = bge_readmem_ind(sc, 0x0c14);
3917 if ((mac_addr >> 16) == 0x484b) {
3918 ether_addr[0] = (uint8_t)(mac_addr >> 8);
3919 ether_addr[1] = (uint8_t)mac_addr;
3920 mac_addr = bge_readmem_ind(sc, 0x0c18);
3921 ether_addr[2] = (uint8_t)(mac_addr >> 24);
3922 ether_addr[3] = (uint8_t)(mac_addr >> 16);
3923 ether_addr[4] = (uint8_t)(mac_addr >> 8);
3924 ether_addr[5] = (uint8_t)mac_addr;
3931 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
3933 int mac_offset = BGE_EE_MAC_OFFSET;
3935 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
3936 mac_offset = BGE_EE_MAC_OFFSET_5906;
3938 return bge_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN);
3942 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
3944 if (sc->bge_flags & BGE_FLAG_NO_EEPROM)
3947 return bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
3952 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
3954 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
3955 /* NOTE: Order is critical */
3957 bge_get_eaddr_nvram,
3958 bge_get_eaddr_eeprom,
3961 const bge_eaddr_fcn_t *func;
3963 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
3964 if ((*func)(sc, eaddr) == 0)
3967 return (*func == NULL ? ENXIO : 0);