2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
37 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 * Written by Bill Paul <wpaul@windriver.com>
40 * Senior Engineer, Wind River Systems
44 * The Broadcom BCM5700 is based on technology originally developed by
45 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
46 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
47 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
48 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
49 * frames, highly configurable RX filtering, and 16 RX and TX queues
50 * (which, along with RX filter rules, can be used for QOS applications).
51 * Other features, such as TCP segmentation, may be available as part
52 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
53 * firmware images can be stored in hardware and need not be compiled
56 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
57 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
59 * The BCM5701 is a single-chip solution incorporating both the BCM5700
60 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
61 * does not support external SSRAM.
63 * Broadcom also produces a variation of the BCM5700 under the "Altima"
64 * brand name, which is functionally similar but lacks PCI-X support.
66 * Without external SSRAM, you can only have at most 4 TX rings,
67 * and the use of the mini RX ring is disabled. This seems to imply
68 * that these features are simply not available on the BCM5701. As a
69 * result, this driver does not implement any support for the mini RX
73 #include "opt_polling.h"
75 #include <sys/param.h>
77 #include <sys/endian.h>
78 #include <sys/kernel.h>
80 #include <sys/interrupt.h>
82 #include <sys/malloc.h>
83 #include <sys/queue.h>
85 #include <sys/serialize.h>
86 #include <sys/socket.h>
87 #include <sys/sockio.h>
88 #include <sys/sysctl.h>
91 #include <net/ethernet.h>
93 #include <net/if_arp.h>
94 #include <net/if_dl.h>
95 #include <net/if_media.h>
96 #include <net/if_types.h>
97 #include <net/ifq_var.h>
98 #include <net/vlan/if_vlan_var.h>
99 #include <net/vlan/if_vlan_ether.h>
101 #include <dev/netif/mii_layer/mii.h>
102 #include <dev/netif/mii_layer/miivar.h>
103 #include <dev/netif/mii_layer/brgphyreg.h>
105 #include <bus/pci/pcidevs.h>
106 #include <bus/pci/pcireg.h>
107 #include <bus/pci/pcivar.h>
109 #include <dev/netif/bge/if_bgereg.h>
111 /* "device miibus" required. See GENERIC if you get errors here. */
112 #include "miibus_if.h"
114 #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
115 #define BGE_MIN_FRAME 60
117 static const struct bge_type bge_devs[] = {
118 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996,
119 "3COM 3C996 Gigabit Ethernet" },
121 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700,
122 "Alteon BCM5700 Gigabit Ethernet" },
123 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701,
124 "Alteon BCM5701 Gigabit Ethernet" },
126 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000,
127 "Altima AC1000 Gigabit Ethernet" },
128 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001,
129 "Altima AC1002 Gigabit Ethernet" },
130 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100,
131 "Altima AC9100 Gigabit Ethernet" },
133 { PCI_VENDOR_APPLE, PCI_PRODUCT_APPLE_BCM5701,
134 "Apple BCM5701 Gigabit Ethernet" },
136 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700,
137 "Broadcom BCM5700 Gigabit Ethernet" },
138 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701,
139 "Broadcom BCM5701 Gigabit Ethernet" },
140 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702,
141 "Broadcom BCM5702 Gigabit Ethernet" },
142 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X,
143 "Broadcom BCM5702X Gigabit Ethernet" },
144 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT,
145 "Broadcom BCM5702 Gigabit Ethernet" },
146 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703,
147 "Broadcom BCM5703 Gigabit Ethernet" },
148 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X,
149 "Broadcom BCM5703X Gigabit Ethernet" },
150 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703A3,
151 "Broadcom BCM5703 Gigabit Ethernet" },
152 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C,
153 "Broadcom BCM5704C Dual Gigabit Ethernet" },
154 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S,
155 "Broadcom BCM5704S Dual Gigabit Ethernet" },
156 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S_ALT,
157 "Broadcom BCM5704S Dual Gigabit Ethernet" },
158 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705,
159 "Broadcom BCM5705 Gigabit Ethernet" },
160 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705F,
161 "Broadcom BCM5705F Gigabit Ethernet" },
162 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K,
163 "Broadcom BCM5705K Gigabit Ethernet" },
164 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M,
165 "Broadcom BCM5705M Gigabit Ethernet" },
166 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT,
167 "Broadcom BCM5705M Gigabit Ethernet" },
168 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714,
169 "Broadcom BCM5714C Gigabit Ethernet" },
170 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714S,
171 "Broadcom BCM5714S Gigabit Ethernet" },
172 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715,
173 "Broadcom BCM5715 Gigabit Ethernet" },
174 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715S,
175 "Broadcom BCM5715S Gigabit Ethernet" },
176 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720,
177 "Broadcom BCM5720 Gigabit Ethernet" },
178 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721,
179 "Broadcom BCM5721 Gigabit Ethernet" },
180 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5722,
181 "Broadcom BCM5722 Gigabit Ethernet" },
182 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750,
183 "Broadcom BCM5750 Gigabit Ethernet" },
184 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750M,
185 "Broadcom BCM5750M Gigabit Ethernet" },
186 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751,
187 "Broadcom BCM5751 Gigabit Ethernet" },
188 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751F,
189 "Broadcom BCM5751F Gigabit Ethernet" },
190 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M,
191 "Broadcom BCM5751M Gigabit Ethernet" },
192 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752,
193 "Broadcom BCM5752 Gigabit Ethernet" },
194 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752M,
195 "Broadcom BCM5752M Gigabit Ethernet" },
196 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753,
197 "Broadcom BCM5753 Gigabit Ethernet" },
198 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753F,
199 "Broadcom BCM5753F Gigabit Ethernet" },
200 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753M,
201 "Broadcom BCM5753M Gigabit Ethernet" },
202 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754,
203 "Broadcom BCM5754 Gigabit Ethernet" },
204 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754M,
205 "Broadcom BCM5754M Gigabit Ethernet" },
206 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755,
207 "Broadcom BCM5755 Gigabit Ethernet" },
208 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755M,
209 "Broadcom BCM5755M Gigabit Ethernet" },
210 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5756,
211 "Broadcom BCM5756 Gigabit Ethernet" },
212 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780,
213 "Broadcom BCM5780 Gigabit Ethernet" },
214 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780S,
215 "Broadcom BCM5780S Gigabit Ethernet" },
216 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5781,
217 "Broadcom BCM5781 Gigabit Ethernet" },
218 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782,
219 "Broadcom BCM5782 Gigabit Ethernet" },
220 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5786,
221 "Broadcom BCM5786 Gigabit Ethernet" },
222 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787,
223 "Broadcom BCM5787 Gigabit Ethernet" },
224 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787F,
225 "Broadcom BCM5787F Gigabit Ethernet" },
226 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787M,
227 "Broadcom BCM5787M Gigabit Ethernet" },
228 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788,
229 "Broadcom BCM5788 Gigabit Ethernet" },
230 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789,
231 "Broadcom BCM5789 Gigabit Ethernet" },
232 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901,
233 "Broadcom BCM5901 Fast Ethernet" },
234 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2,
235 "Broadcom BCM5901A2 Fast Ethernet" },
236 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5903M,
237 "Broadcom BCM5903M Fast Ethernet" },
238 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906,
239 "Broadcom BCM5906 Fast Ethernet"},
240 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906M,
241 "Broadcom BCM5906M Fast Ethernet"},
243 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
244 "SysKonnect Gigabit Ethernet" },
249 #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
250 #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
251 #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
252 #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
253 #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
255 typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
257 static int bge_probe(device_t);
258 static int bge_attach(device_t);
259 static int bge_detach(device_t);
260 static void bge_txeof(struct bge_softc *);
261 static void bge_rxeof(struct bge_softc *);
263 static void bge_tick(void *);
264 static void bge_stats_update(struct bge_softc *);
265 static void bge_stats_update_regs(struct bge_softc *);
266 static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
268 #ifdef DEVICE_POLLING
269 static void bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
271 static void bge_intr(void *);
272 static void bge_enable_intr(struct bge_softc *);
273 static void bge_disable_intr(struct bge_softc *);
274 static void bge_start(struct ifnet *);
275 static int bge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
276 static void bge_init(void *);
277 static void bge_stop(struct bge_softc *);
278 static void bge_watchdog(struct ifnet *);
279 static void bge_shutdown(device_t);
280 static int bge_suspend(device_t);
281 static int bge_resume(device_t);
282 static int bge_ifmedia_upd(struct ifnet *);
283 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
285 static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
286 static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
288 static uint8_t bge_eeprom_getbyte(struct bge_softc *, uint32_t, uint8_t *);
289 static int bge_read_eeprom(struct bge_softc *, caddr_t, uint32_t, size_t);
291 static void bge_setmulti(struct bge_softc *);
292 static void bge_setpromisc(struct bge_softc *);
294 static int bge_alloc_jumbo_mem(struct bge_softc *);
295 static void bge_free_jumbo_mem(struct bge_softc *);
296 static struct bge_jslot
297 *bge_jalloc(struct bge_softc *);
298 static void bge_jfree(void *);
299 static void bge_jref(void *);
300 static int bge_newbuf_std(struct bge_softc *, int, int);
301 static int bge_newbuf_jumbo(struct bge_softc *, int, int);
302 static void bge_setup_rxdesc_std(struct bge_softc *, int);
303 static void bge_setup_rxdesc_jumbo(struct bge_softc *, int);
304 static int bge_init_rx_ring_std(struct bge_softc *);
305 static void bge_free_rx_ring_std(struct bge_softc *);
306 static int bge_init_rx_ring_jumbo(struct bge_softc *);
307 static void bge_free_rx_ring_jumbo(struct bge_softc *);
308 static void bge_free_tx_ring(struct bge_softc *);
309 static int bge_init_tx_ring(struct bge_softc *);
311 static int bge_chipinit(struct bge_softc *);
312 static int bge_blockinit(struct bge_softc *);
314 static uint32_t bge_readmem_ind(struct bge_softc *, uint32_t);
315 static void bge_writemem_ind(struct bge_softc *, uint32_t, uint32_t);
317 static uint32_t bge_readreg_ind(struct bge_softc *, uint32_t);
319 static void bge_writereg_ind(struct bge_softc *, uint32_t, uint32_t);
320 static void bge_writemem_direct(struct bge_softc *, uint32_t, uint32_t);
321 static void bge_writembx(struct bge_softc *, int, int);
323 static int bge_miibus_readreg(device_t, int, int);
324 static int bge_miibus_writereg(device_t, int, int, int);
325 static void bge_miibus_statchg(device_t);
326 static void bge_bcm5700_link_upd(struct bge_softc *, uint32_t);
327 static void bge_tbi_link_upd(struct bge_softc *, uint32_t);
328 static void bge_copper_link_upd(struct bge_softc *, uint32_t);
330 static void bge_reset(struct bge_softc *);
332 static int bge_dma_alloc(struct bge_softc *);
333 static void bge_dma_free(struct bge_softc *);
334 static int bge_dma_block_alloc(struct bge_softc *, bus_size_t,
335 bus_dma_tag_t *, bus_dmamap_t *,
336 void **, bus_addr_t *);
337 static void bge_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
339 static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
340 static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
341 static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
342 static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
344 static void bge_coal_change(struct bge_softc *);
345 static int bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS);
346 static int bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS);
347 static int bge_sysctl_rx_max_coal_bds(SYSCTL_HANDLER_ARGS);
348 static int bge_sysctl_tx_max_coal_bds(SYSCTL_HANDLER_ARGS);
349 static int bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *, uint32_t);
352 * Set following tunable to 1 for some IBM blade servers with the DNLK
353 * switch module. Auto negotiation is broken for those configurations.
355 static int bge_fake_autoneg = 0;
356 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg);
358 /* Interrupt moderation control variables. */
359 static int bge_rx_coal_ticks = 100; /* usec */
360 static int bge_tx_coal_ticks = 1023; /* usec */
361 static int bge_rx_max_coal_bds = 80;
362 static int bge_tx_max_coal_bds = 128;
364 TUNABLE_INT("hw.bge.rx_coal_ticks", &bge_rx_coal_ticks);
365 TUNABLE_INT("hw.bge.tx_coal_ticks", &bge_tx_coal_ticks);
366 TUNABLE_INT("hw.bge.rx_max_coal_bds", &bge_rx_max_coal_bds);
367 TUNABLE_INT("hw.bge.tx_max_coal_bds", &bge_tx_max_coal_bds);
369 #if !defined(KTR_IF_BGE)
370 #define KTR_IF_BGE KTR_ALL
372 KTR_INFO_MASTER(if_bge);
373 KTR_INFO(KTR_IF_BGE, if_bge, intr, 0, "intr", 0);
374 KTR_INFO(KTR_IF_BGE, if_bge, rx_pkt, 1, "rx_pkt", 0);
375 KTR_INFO(KTR_IF_BGE, if_bge, tx_pkt, 2, "tx_pkt", 0);
376 #define logif(name) KTR_LOG(if_bge_ ## name)
378 static device_method_t bge_methods[] = {
379 /* Device interface */
380 DEVMETHOD(device_probe, bge_probe),
381 DEVMETHOD(device_attach, bge_attach),
382 DEVMETHOD(device_detach, bge_detach),
383 DEVMETHOD(device_shutdown, bge_shutdown),
384 DEVMETHOD(device_suspend, bge_suspend),
385 DEVMETHOD(device_resume, bge_resume),
388 DEVMETHOD(bus_print_child, bus_generic_print_child),
389 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
392 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
393 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
394 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
399 static DEFINE_CLASS_0(bge, bge_driver, bge_methods, sizeof(struct bge_softc));
400 static devclass_t bge_devclass;
402 DECLARE_DUMMY_MODULE(if_bge);
403 DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, NULL, NULL);
404 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, NULL, NULL);
407 bge_readmem_ind(struct bge_softc *sc, uint32_t off)
409 device_t dev = sc->bge_dev;
412 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
413 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
414 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
419 bge_writemem_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
421 device_t dev = sc->bge_dev;
423 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
424 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
425 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
430 bge_readreg_ind(struct bge_softc *sc, uin32_t off)
432 device_t dev = sc->bge_dev;
434 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
435 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
440 bge_writereg_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
442 device_t dev = sc->bge_dev;
444 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
445 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
449 bge_writemem_direct(struct bge_softc *sc, uint32_t off, uint32_t val)
451 CSR_WRITE_4(sc, off, val);
455 bge_writembx(struct bge_softc *sc, int off, int val)
457 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
458 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
460 CSR_WRITE_4(sc, off, val);
464 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
466 uint32_t access, byte = 0;
470 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
471 for (i = 0; i < 8000; i++) {
472 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
480 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
481 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
483 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
484 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
485 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
487 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
493 if (i == BGE_TIMEOUT * 10) {
494 if_printf(&sc->arpcom.ac_if, "nvram read timed out\n");
499 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
501 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
503 /* Disable access. */
504 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
507 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
508 CSR_READ_4(sc, BGE_NVRAM_SWARB);
514 * Read a sequence of bytes from NVRAM.
517 bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
522 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
525 for (i = 0; i < cnt; i++) {
526 err = bge_nvram_getbyte(sc, off + i, &byte);
532 return (err ? 1 : 0);
536 * Read a byte of data stored in the EEPROM at address 'addr.' The
537 * BCM570x supports both the traditional bitbang interface and an
538 * auto access interface for reading the EEPROM. We use the auto
542 bge_eeprom_getbyte(struct bge_softc *sc, uint32_t addr, uint8_t *dest)
548 * Enable use of auto EEPROM access so we can avoid
549 * having to use the bitbang method.
551 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
553 /* Reset the EEPROM, load the clock period. */
554 CSR_WRITE_4(sc, BGE_EE_ADDR,
555 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
558 /* Issue the read EEPROM command. */
559 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
561 /* Wait for completion */
562 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
564 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
568 if (i == BGE_TIMEOUT) {
569 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
574 byte = CSR_READ_4(sc, BGE_EE_DATA);
576 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
582 * Read a sequence of bytes from the EEPROM.
585 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, uint32_t off, size_t len)
591 for (byte = 0, err = 0, i = 0; i < len; i++) {
592 err = bge_eeprom_getbyte(sc, off + i, &byte);
602 bge_miibus_readreg(device_t dev, int phy, int reg)
604 struct bge_softc *sc = device_get_softc(dev);
605 struct ifnet *ifp = &sc->arpcom.ac_if;
606 uint32_t val, autopoll;
610 * Broadcom's own driver always assumes the internal
611 * PHY is at GMII address 1. On some chips, the PHY responds
612 * to accesses at all addresses, which could cause us to
613 * bogusly attach the PHY 32 times at probe type. Always
614 * restricting the lookup to address 1 is simpler than
615 * trying to figure out which chips revisions should be
621 /* Reading with autopolling on may trigger PCI errors */
622 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
623 if (autopoll & BGE_MIMODE_AUTOPOLL) {
624 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
628 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
629 BGE_MIPHY(phy)|BGE_MIREG(reg));
631 for (i = 0; i < BGE_TIMEOUT; i++) {
633 val = CSR_READ_4(sc, BGE_MI_COMM);
634 if (!(val & BGE_MICOMM_BUSY))
638 if (i == BGE_TIMEOUT) {
639 if_printf(ifp, "PHY read timed out "
640 "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val);
646 val = CSR_READ_4(sc, BGE_MI_COMM);
649 if (autopoll & BGE_MIMODE_AUTOPOLL) {
650 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
654 if (val & BGE_MICOMM_READFAIL)
657 return(val & 0xFFFF);
661 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
663 struct bge_softc *sc = device_get_softc(dev);
668 * See the related comment in bge_miibus_readreg()
673 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
674 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
677 /* Reading with autopolling on may trigger PCI errors */
678 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
679 if (autopoll & BGE_MIMODE_AUTOPOLL) {
680 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
684 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
685 BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
687 for (i = 0; i < BGE_TIMEOUT; i++) {
689 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
691 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
696 if (autopoll & BGE_MIMODE_AUTOPOLL) {
697 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
701 if (i == BGE_TIMEOUT) {
702 if_printf(&sc->arpcom.ac_if, "PHY write timed out "
703 "(phy %d, reg %d, val %d)\n", phy, reg, val);
711 bge_miibus_statchg(device_t dev)
713 struct bge_softc *sc;
714 struct mii_data *mii;
716 sc = device_get_softc(dev);
717 mii = device_get_softc(sc->bge_miibus);
719 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
720 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
721 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
723 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
726 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
727 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
729 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
734 * Memory management for jumbo frames.
737 bge_alloc_jumbo_mem(struct bge_softc *sc)
739 struct ifnet *ifp = &sc->arpcom.ac_if;
740 struct bge_jslot *entry;
746 * Create tag for jumbo mbufs.
747 * This is really a bit of a kludge. We allocate a special
748 * jumbo buffer pool which (thanks to the way our DMA
749 * memory allocation works) will consist of contiguous
750 * pages. This means that even though a jumbo buffer might
751 * be larger than a page size, we don't really need to
752 * map it into more than one DMA segment. However, the
753 * default mbuf tag will result in multi-segment mappings,
754 * so we have to create a special jumbo mbuf tag that
755 * lets us get away with mapping the jumbo buffers as
756 * a single segment. I think eventually the driver should
757 * be changed so that it uses ordinary mbufs and cluster
758 * buffers, i.e. jumbo frames can span multiple DMA
759 * descriptors. But that's a project for another day.
763 * Create DMA stuffs for jumbo RX ring.
765 error = bge_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ,
766 &sc->bge_cdata.bge_rx_jumbo_ring_tag,
767 &sc->bge_cdata.bge_rx_jumbo_ring_map,
768 (void *)&sc->bge_ldata.bge_rx_jumbo_ring,
769 &sc->bge_ldata.bge_rx_jumbo_ring_paddr);
771 if_printf(ifp, "could not create jumbo RX ring\n");
776 * Create DMA stuffs for jumbo buffer block.
778 error = bge_dma_block_alloc(sc, BGE_JMEM,
779 &sc->bge_cdata.bge_jumbo_tag,
780 &sc->bge_cdata.bge_jumbo_map,
781 (void **)&sc->bge_ldata.bge_jumbo_buf,
784 if_printf(ifp, "could not create jumbo buffer\n");
788 SLIST_INIT(&sc->bge_jfree_listhead);
791 * Now divide it up into 9K pieces and save the addresses
792 * in an array. Note that we play an evil trick here by using
793 * the first few bytes in the buffer to hold the the address
794 * of the softc structure for this interface. This is because
795 * bge_jfree() needs it, but it is called by the mbuf management
796 * code which will not pass it to us explicitly.
798 for (i = 0, ptr = sc->bge_ldata.bge_jumbo_buf; i < BGE_JSLOTS; i++) {
799 entry = &sc->bge_cdata.bge_jslots[i];
801 entry->bge_buf = ptr;
802 entry->bge_paddr = paddr;
803 entry->bge_inuse = 0;
805 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jslot_link);
814 bge_free_jumbo_mem(struct bge_softc *sc)
816 /* Destroy jumbo RX ring. */
817 bge_dma_block_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
818 sc->bge_cdata.bge_rx_jumbo_ring_map,
819 sc->bge_ldata.bge_rx_jumbo_ring);
821 /* Destroy jumbo buffer block. */
822 bge_dma_block_free(sc->bge_cdata.bge_jumbo_tag,
823 sc->bge_cdata.bge_jumbo_map,
824 sc->bge_ldata.bge_jumbo_buf);
828 * Allocate a jumbo buffer.
830 static struct bge_jslot *
831 bge_jalloc(struct bge_softc *sc)
833 struct bge_jslot *entry;
835 lwkt_serialize_enter(&sc->bge_jslot_serializer);
836 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
838 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jslot_link);
839 entry->bge_inuse = 1;
841 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
843 lwkt_serialize_exit(&sc->bge_jslot_serializer);
848 * Adjust usage count on a jumbo buffer.
853 struct bge_jslot *entry = (struct bge_jslot *)arg;
854 struct bge_softc *sc = entry->bge_sc;
857 panic("bge_jref: can't find softc pointer!");
859 if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
860 panic("bge_jref: asked to reference buffer "
861 "that we don't manage!");
862 } else if (entry->bge_inuse == 0) {
863 panic("bge_jref: buffer already free!");
865 atomic_add_int(&entry->bge_inuse, 1);
870 * Release a jumbo buffer.
875 struct bge_jslot *entry = (struct bge_jslot *)arg;
876 struct bge_softc *sc = entry->bge_sc;
879 panic("bge_jfree: can't find softc pointer!");
881 if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
882 panic("bge_jfree: asked to free buffer that we don't manage!");
883 } else if (entry->bge_inuse == 0) {
884 panic("bge_jfree: buffer already free!");
887 * Possible MP race to 0, use the serializer. The atomic insn
888 * is still needed for races against bge_jref().
890 lwkt_serialize_enter(&sc->bge_jslot_serializer);
891 atomic_subtract_int(&entry->bge_inuse, 1);
892 if (entry->bge_inuse == 0) {
893 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
896 lwkt_serialize_exit(&sc->bge_jslot_serializer);
902 * Intialize a standard receive ring descriptor.
905 bge_newbuf_std(struct bge_softc *sc, int i, int init)
907 struct mbuf *m_new = NULL;
908 bus_dma_segment_t seg;
912 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
915 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
917 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
918 m_adj(m_new, ETHER_ALIGN);
920 error = bus_dmamap_load_mbuf_segment(sc->bge_cdata.bge_rx_mtag,
921 sc->bge_cdata.bge_rx_tmpmap, m_new,
922 &seg, 1, &nsegs, BUS_DMA_NOWAIT);
929 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
930 sc->bge_cdata.bge_rx_std_dmamap[i],
931 BUS_DMASYNC_POSTREAD);
932 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
933 sc->bge_cdata.bge_rx_std_dmamap[i]);
936 map = sc->bge_cdata.bge_rx_tmpmap;
937 sc->bge_cdata.bge_rx_tmpmap = sc->bge_cdata.bge_rx_std_dmamap[i];
938 sc->bge_cdata.bge_rx_std_dmamap[i] = map;
940 sc->bge_cdata.bge_rx_std_chain[i].bge_mbuf = m_new;
941 sc->bge_cdata.bge_rx_std_chain[i].bge_paddr = seg.ds_addr;
943 bge_setup_rxdesc_std(sc, i);
948 bge_setup_rxdesc_std(struct bge_softc *sc, int i)
950 struct bge_rxchain *rc;
953 rc = &sc->bge_cdata.bge_rx_std_chain[i];
954 r = &sc->bge_ldata.bge_rx_std_ring[i];
956 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bge_paddr);
957 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bge_paddr);
958 r->bge_len = rc->bge_mbuf->m_len;
960 r->bge_flags = BGE_RXBDFLAG_END;
964 * Initialize a jumbo receive ring descriptor. This allocates
965 * a jumbo buffer from the pool managed internally by the driver.
968 bge_newbuf_jumbo(struct bge_softc *sc, int i, int init)
970 struct mbuf *m_new = NULL;
971 struct bge_jslot *buf;
974 /* Allocate the mbuf. */
975 MGETHDR(m_new, init ? MB_WAIT : MB_DONTWAIT, MT_DATA);
979 /* Allocate the jumbo buffer */
980 buf = bge_jalloc(sc);
986 /* Attach the buffer to the mbuf. */
987 m_new->m_ext.ext_arg = buf;
988 m_new->m_ext.ext_buf = buf->bge_buf;
989 m_new->m_ext.ext_free = bge_jfree;
990 m_new->m_ext.ext_ref = bge_jref;
991 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
993 m_new->m_flags |= M_EXT;
995 m_new->m_data = m_new->m_ext.ext_buf;
996 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
998 paddr = buf->bge_paddr;
999 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) {
1000 m_adj(m_new, ETHER_ALIGN);
1001 paddr += ETHER_ALIGN;
1004 /* Save necessary information */
1005 sc->bge_cdata.bge_rx_jumbo_chain[i].bge_mbuf = m_new;
1006 sc->bge_cdata.bge_rx_jumbo_chain[i].bge_paddr = paddr;
1008 /* Set up the descriptor. */
1009 bge_setup_rxdesc_jumbo(sc, i);
1014 bge_setup_rxdesc_jumbo(struct bge_softc *sc, int i)
1016 struct bge_rx_bd *r;
1017 struct bge_rxchain *rc;
1019 r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
1020 rc = &sc->bge_cdata.bge_rx_jumbo_chain[i];
1022 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bge_paddr);
1023 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bge_paddr);
1024 r->bge_len = rc->bge_mbuf->m_len;
1026 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
1030 bge_init_rx_ring_std(struct bge_softc *sc)
1034 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1035 error = bge_newbuf_std(sc, i, 1);
1040 sc->bge_std = BGE_STD_RX_RING_CNT - 1;
1041 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1047 bge_free_rx_ring_std(struct bge_softc *sc)
1051 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1052 struct bge_rxchain *rc = &sc->bge_cdata.bge_rx_std_chain[i];
1054 if (rc->bge_mbuf != NULL) {
1055 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1056 sc->bge_cdata.bge_rx_std_dmamap[i]);
1057 m_freem(rc->bge_mbuf);
1058 rc->bge_mbuf = NULL;
1060 bzero(&sc->bge_ldata.bge_rx_std_ring[i],
1061 sizeof(struct bge_rx_bd));
1066 bge_init_rx_ring_jumbo(struct bge_softc *sc)
1068 struct bge_rcb *rcb;
1071 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1072 error = bge_newbuf_jumbo(sc, i, 1);
1077 sc->bge_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
1079 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1080 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
1081 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1083 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1089 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1093 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1094 struct bge_rxchain *rc = &sc->bge_cdata.bge_rx_jumbo_chain[i];
1096 if (rc->bge_mbuf != NULL) {
1097 m_freem(rc->bge_mbuf);
1098 rc->bge_mbuf = NULL;
1100 bzero(&sc->bge_ldata.bge_rx_jumbo_ring[i],
1101 sizeof(struct bge_rx_bd));
1106 bge_free_tx_ring(struct bge_softc *sc)
1110 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1111 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1112 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1113 sc->bge_cdata.bge_tx_dmamap[i]);
1114 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1115 sc->bge_cdata.bge_tx_chain[i] = NULL;
1117 bzero(&sc->bge_ldata.bge_tx_ring[i],
1118 sizeof(struct bge_tx_bd));
1123 bge_init_tx_ring(struct bge_softc *sc)
1126 sc->bge_tx_saved_considx = 0;
1127 sc->bge_tx_prodidx = 0;
1129 /* Initialize transmit producer index for host-memory send ring. */
1130 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1132 /* 5700 b2 errata */
1133 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1134 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1136 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1137 /* 5700 b2 errata */
1138 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1139 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1145 bge_setmulti(struct bge_softc *sc)
1148 struct ifmultiaddr *ifma;
1149 uint32_t hashes[4] = { 0, 0, 0, 0 };
1152 ifp = &sc->arpcom.ac_if;
1154 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1155 for (i = 0; i < 4; i++)
1156 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1160 /* First, zot all the existing filters. */
1161 for (i = 0; i < 4; i++)
1162 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1164 /* Now program new ones. */
1165 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1166 if (ifma->ifma_addr->sa_family != AF_LINK)
1169 LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1170 ETHER_ADDR_LEN) & 0x7f;
1171 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1174 for (i = 0; i < 4; i++)
1175 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1179 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1180 * self-test results.
1183 bge_chipinit(struct bge_softc *sc)
1186 uint32_t dma_rw_ctl;
1188 /* Set endian type before we access any non-PCI registers. */
1189 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1191 /* Clear the MAC control register */
1192 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1195 * Clear the MAC statistics block in the NIC's
1198 for (i = BGE_STATS_BLOCK;
1199 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1200 BGE_MEMWIN_WRITE(sc, i, 0);
1202 for (i = BGE_STATUS_BLOCK;
1203 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1204 BGE_MEMWIN_WRITE(sc, i, 0);
1206 /* Set up the PCI DMA control register. */
1207 if (sc->bge_flags & BGE_FLAG_PCIE) {
1209 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1210 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1211 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1212 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1214 if (BGE_IS_5714_FAMILY(sc)) {
1215 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD;
1216 dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */
1217 /* XXX magic values, Broadcom-supplied Linux driver */
1218 if (sc->bge_asicrev == BGE_ASICREV_BCM5780) {
1219 dma_rw_ctl |= (1 << 20) | (1 << 18) |
1220 BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1222 dma_rw_ctl |= (1 << 20) | (1 << 18) | (1 << 15);
1224 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1226 * The 5704 uses a different encoding of read/write
1229 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1230 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1231 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1233 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1234 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1235 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1240 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1241 * for hardware bugs.
1243 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1244 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1247 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1248 if (tmp == 0x6 || tmp == 0x7)
1249 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1252 /* Conventional PCI bus */
1253 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1254 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1255 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1259 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1260 sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1261 sc->bge_asicrev == BGE_ASICREV_BCM5705)
1262 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1263 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1266 * Set up general mode register.
1268 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1269 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1270 BGE_MODECTL_TX_NO_PHDR_CSUM);
1273 * Disable memory write invalidate. Apparently it is not supported
1274 * properly by these devices.
1276 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1278 /* Set the timer prescaler (always 66Mhz) */
1279 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1281 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1282 DELAY(40); /* XXX */
1284 /* Put PHY into ready state */
1285 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1286 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1294 bge_blockinit(struct bge_softc *sc)
1296 struct bge_rcb *rcb;
1303 * Initialize the memory window pointer register so that
1304 * we can access the first 32K of internal NIC RAM. This will
1305 * allow us to set up the TX send ring RCBs and the RX return
1306 * ring RCBs, plus other things which live in NIC memory.
1308 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1310 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1312 if (!BGE_IS_5705_PLUS(sc)) {
1313 /* Configure mbuf memory pool */
1314 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1315 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1316 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1318 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1320 /* Configure DMA resource pool */
1321 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1322 BGE_DMA_DESCRIPTORS);
1323 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1326 /* Configure mbuf pool watermarks */
1327 if (!BGE_IS_5705_PLUS(sc)) {
1328 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1329 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1330 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1331 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1332 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1333 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1334 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1336 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1337 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1338 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1341 /* Configure DMA resource watermarks */
1342 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1343 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1345 /* Enable buffer manager */
1346 if (!BGE_IS_5705_PLUS(sc)) {
1347 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1348 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1350 /* Poll for buffer manager start indication */
1351 for (i = 0; i < BGE_TIMEOUT; i++) {
1352 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1357 if (i == BGE_TIMEOUT) {
1358 if_printf(&sc->arpcom.ac_if,
1359 "buffer manager failed to start\n");
1364 /* Enable flow-through queues */
1365 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1366 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1368 /* Wait until queue initialization is complete */
1369 for (i = 0; i < BGE_TIMEOUT; i++) {
1370 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1375 if (i == BGE_TIMEOUT) {
1376 if_printf(&sc->arpcom.ac_if,
1377 "flow-through queue init failed\n");
1381 /* Initialize the standard RX ring control block */
1382 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1383 rcb->bge_hostaddr.bge_addr_lo =
1384 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1385 rcb->bge_hostaddr.bge_addr_hi =
1386 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1387 if (BGE_IS_5705_PLUS(sc))
1388 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1390 rcb->bge_maxlen_flags =
1391 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1392 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1393 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1394 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1395 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1396 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1399 * Initialize the jumbo RX ring control block
1400 * We set the 'ring disabled' bit in the flags
1401 * field until we're actually ready to start
1402 * using this ring (i.e. once we set the MTU
1403 * high enough to require it).
1405 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1406 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1408 rcb->bge_hostaddr.bge_addr_lo =
1409 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1410 rcb->bge_hostaddr.bge_addr_hi =
1411 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1412 rcb->bge_maxlen_flags =
1413 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1414 BGE_RCB_FLAG_RING_DISABLED);
1415 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1416 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1417 rcb->bge_hostaddr.bge_addr_hi);
1418 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1419 rcb->bge_hostaddr.bge_addr_lo);
1420 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1421 rcb->bge_maxlen_flags);
1422 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1424 /* Set up dummy disabled mini ring RCB */
1425 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1426 rcb->bge_maxlen_flags =
1427 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1428 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1429 rcb->bge_maxlen_flags);
1433 * Set the BD ring replentish thresholds. The recommended
1434 * values are 1/8th the number of descriptors allocated to
1437 if (BGE_IS_5705_PLUS(sc))
1440 val = BGE_STD_RX_RING_CNT / 8;
1441 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1442 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1445 * Disable all unused send rings by setting the 'ring disabled'
1446 * bit in the flags field of all the TX send ring control blocks.
1447 * These are located in NIC memory.
1449 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1450 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1451 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1452 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1453 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1454 vrcb += sizeof(struct bge_rcb);
1457 /* Configure TX RCB 0 (we use only the first ring) */
1458 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1459 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1460 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1461 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1462 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1463 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1464 if (!BGE_IS_5705_PLUS(sc)) {
1465 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1466 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1469 /* Disable all unused RX return rings */
1470 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1471 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1472 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1473 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1474 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1475 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1476 BGE_RCB_FLAG_RING_DISABLED));
1477 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1478 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1479 (i * (sizeof(uint64_t))), 0);
1480 vrcb += sizeof(struct bge_rcb);
1483 /* Initialize RX ring indexes */
1484 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1485 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1486 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1489 * Set up RX return ring 0
1490 * Note that the NIC address for RX return rings is 0x00000000.
1491 * The return rings live entirely within the host, so the
1492 * nicaddr field in the RCB isn't used.
1494 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1495 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1496 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1497 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1498 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1499 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1500 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1502 /* Set random backoff seed for TX */
1503 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1504 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1505 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1506 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1507 BGE_TX_BACKOFF_SEED_MASK);
1509 /* Set inter-packet gap */
1510 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1513 * Specify which ring to use for packets that don't match
1516 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1519 * Configure number of RX lists. One interrupt distribution
1520 * list, sixteen active lists, one bad frames class.
1522 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1524 /* Inialize RX list placement stats mask. */
1525 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1526 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1528 /* Disable host coalescing until we get it set up */
1529 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1531 /* Poll to make sure it's shut down. */
1532 for (i = 0; i < BGE_TIMEOUT; i++) {
1533 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1538 if (i == BGE_TIMEOUT) {
1539 if_printf(&sc->arpcom.ac_if,
1540 "host coalescing engine failed to idle\n");
1544 /* Set up host coalescing defaults */
1545 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1546 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1547 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1548 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1549 if (!BGE_IS_5705_PLUS(sc)) {
1550 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1551 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1553 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1554 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1556 /* Set up address of statistics block */
1557 if (!BGE_IS_5705_PLUS(sc)) {
1558 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1559 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1560 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1561 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1563 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1564 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1565 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1568 /* Set up address of status block */
1569 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1570 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1571 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1572 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1573 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1574 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1576 /* Turn on host coalescing state machine */
1577 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1579 /* Turn on RX BD completion state machine and enable attentions */
1580 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1581 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1583 /* Turn on RX list placement state machine */
1584 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1586 /* Turn on RX list selector state machine. */
1587 if (!BGE_IS_5705_PLUS(sc))
1588 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1590 /* Turn on DMA, clear stats */
1591 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1592 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1593 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1594 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1595 ((sc->bge_flags & BGE_FLAG_TBI) ?
1596 BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1598 /* Set misc. local control, enable interrupts on attentions */
1599 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1602 /* Assert GPIO pins for PHY reset */
1603 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1604 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1605 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1606 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1609 /* Turn on DMA completion state machine */
1610 if (!BGE_IS_5705_PLUS(sc))
1611 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1613 /* Turn on write DMA state machine */
1614 val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1615 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1616 sc->bge_asicrev == BGE_ASICREV_BCM5787)
1617 val |= (1 << 29); /* Enable host coalescing bug fix. */
1618 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1621 /* Turn on read DMA state machine */
1622 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1623 if (sc->bge_flags & BGE_FLAG_PCIE)
1624 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1625 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1628 /* Turn on RX data completion state machine */
1629 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1631 /* Turn on RX BD initiator state machine */
1632 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1634 /* Turn on RX data and RX BD initiator state machine */
1635 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1637 /* Turn on Mbuf cluster free state machine */
1638 if (!BGE_IS_5705_PLUS(sc))
1639 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1641 /* Turn on send BD completion state machine */
1642 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1644 /* Turn on send data completion state machine */
1645 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1647 /* Turn on send data initiator state machine */
1648 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1650 /* Turn on send BD initiator state machine */
1651 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1653 /* Turn on send BD selector state machine */
1654 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1656 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1657 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1658 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1660 /* ack/clear link change events */
1661 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1662 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1663 BGE_MACSTAT_LINK_CHANGED);
1664 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1666 /* Enable PHY auto polling (for MII/GMII only) */
1667 if (sc->bge_flags & BGE_FLAG_TBI) {
1668 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1670 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1671 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1672 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
1673 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1674 BGE_EVTENB_MI_INTERRUPT);
1679 * Clear any pending link state attention.
1680 * Otherwise some link state change events may be lost until attention
1681 * is cleared by bge_intr() -> bge_softc.bge_link_upd() sequence.
1682 * It's not necessary on newer BCM chips - perhaps enabling link
1683 * state change attentions implies clearing pending attention.
1685 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1686 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1687 BGE_MACSTAT_LINK_CHANGED);
1689 /* Enable link state change attentions. */
1690 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1696 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1697 * against our list and return its name if we find a match. Note
1698 * that since the Broadcom controller contains VPD support, we
1699 * can get the device name string from the controller itself instead
1700 * of the compiled-in string. This is a little slow, but it guarantees
1701 * we'll always announce the right product name.
1704 bge_probe(device_t dev)
1706 const struct bge_type *t;
1707 uint16_t product, vendor;
1709 product = pci_get_device(dev);
1710 vendor = pci_get_vendor(dev);
1712 for (t = bge_devs; t->bge_name != NULL; t++) {
1713 if (vendor == t->bge_vid && product == t->bge_did)
1716 if (t->bge_name == NULL)
1719 device_set_desc(dev, t->bge_name);
1720 if (pci_get_subvendor(dev) == PCI_VENDOR_DELL) {
1721 struct bge_softc *sc = device_get_softc(dev);
1722 sc->bge_flags |= BGE_FLAG_NO_3LED;
1728 bge_attach(device_t dev)
1731 struct bge_softc *sc;
1734 uint8_t ether_addr[ETHER_ADDR_LEN];
1736 sc = device_get_softc(dev);
1738 callout_init(&sc->bge_stat_timer);
1739 lwkt_serialize_init(&sc->bge_jslot_serializer);
1741 #ifndef BURN_BRIDGES
1742 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1745 irq = pci_read_config(dev, PCIR_INTLINE, 4);
1746 mem = pci_read_config(dev, BGE_PCI_BAR0, 4);
1748 device_printf(dev, "chip is in D%d power mode "
1749 "-- setting to D0\n", pci_get_powerstate(dev));
1751 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1753 pci_write_config(dev, PCIR_INTLINE, irq, 4);
1754 pci_write_config(dev, BGE_PCI_BAR0, mem, 4);
1756 #endif /* !BURN_BRIDGE */
1759 * Map control/status registers.
1761 pci_enable_busmaster(dev);
1764 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1767 if (sc->bge_res == NULL) {
1768 device_printf(dev, "couldn't map memory\n");
1772 sc->bge_btag = rman_get_bustag(sc->bge_res);
1773 sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
1775 /* Save various chip information */
1777 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1778 BGE_PCIMISCCTL_ASICREV;
1779 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
1780 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
1782 /* Save chipset family. */
1783 switch (sc->bge_asicrev) {
1784 case BGE_ASICREV_BCM5700:
1785 case BGE_ASICREV_BCM5701:
1786 case BGE_ASICREV_BCM5703:
1787 case BGE_ASICREV_BCM5704:
1788 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
1791 case BGE_ASICREV_BCM5714_A0:
1792 case BGE_ASICREV_BCM5780:
1793 case BGE_ASICREV_BCM5714:
1794 sc->bge_flags |= BGE_FLAG_5714_FAMILY;
1797 case BGE_ASICREV_BCM5750:
1798 case BGE_ASICREV_BCM5752:
1799 case BGE_ASICREV_BCM5755:
1800 case BGE_ASICREV_BCM5787:
1801 case BGE_ASICREV_BCM5906:
1802 sc->bge_flags |= BGE_FLAG_575X_PLUS;
1805 case BGE_ASICREV_BCM5705:
1806 sc->bge_flags |= BGE_FLAG_5705_PLUS;
1810 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
1811 sc->bge_flags |= BGE_FLAG_NO_EEPROM;
1814 * Set various quirk flags.
1817 sc->bge_flags |= BGE_FLAG_ETH_WIRESPEED;
1818 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1819 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
1820 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
1821 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
1822 sc->bge_asicrev == BGE_ASICREV_BCM5906)
1823 sc->bge_flags &= ~BGE_FLAG_ETH_WIRESPEED;
1825 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
1826 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
1827 sc->bge_flags |= BGE_FLAG_CRC_BUG;
1829 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
1830 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
1831 sc->bge_flags |= BGE_FLAG_ADC_BUG;
1833 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
1834 sc->bge_flags |= BGE_FLAG_5704_A0_BUG;
1836 if (BGE_IS_5705_PLUS(sc)) {
1837 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1838 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
1839 uint32_t product = pci_get_device(dev);
1841 if (product != PCI_PRODUCT_BROADCOM_BCM5722 &&
1842 product != PCI_PRODUCT_BROADCOM_BCM5756)
1843 sc->bge_flags |= BGE_FLAG_JITTER_BUG;
1844 if (product == PCI_PRODUCT_BROADCOM_BCM5755M)
1845 sc->bge_flags |= BGE_FLAG_ADJUST_TRIM;
1846 } else if (sc->bge_asicrev != BGE_ASICREV_BCM5906) {
1847 sc->bge_flags |= BGE_FLAG_BER_BUG;
1851 /* Allocate interrupt */
1854 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1855 RF_SHAREABLE | RF_ACTIVE);
1857 if (sc->bge_irq == NULL) {
1858 device_printf(dev, "couldn't map interrupt\n");
1864 * Check if this is a PCI-X or PCI Express device.
1866 if (BGE_IS_5705_PLUS(sc)) {
1867 if (pci_is_pcie(dev)) {
1868 sc->bge_flags |= BGE_FLAG_PCIE;
1869 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096);
1873 * Check if the device is in PCI-X Mode.
1874 * (This bit is not valid on PCI Express controllers.)
1876 if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1877 BGE_PCISTATE_PCI_BUSMODE) == 0)
1878 sc->bge_flags |= BGE_FLAG_PCIX;
1881 device_printf(dev, "CHIP ID 0x%08x; "
1882 "ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
1883 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
1884 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X"
1885 : ((sc->bge_flags & BGE_FLAG_PCIE) ?
1888 ifp = &sc->arpcom.ac_if;
1889 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1891 /* Try to reset the chip. */
1894 if (bge_chipinit(sc)) {
1895 device_printf(dev, "chip initialization failed\n");
1901 * Get station address
1903 error = bge_get_eaddr(sc, ether_addr);
1905 device_printf(dev, "failed to read station address\n");
1909 /* 5705/5750 limits RX return ring to 512 entries. */
1910 if (BGE_IS_5705_PLUS(sc))
1911 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1913 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
1915 error = bge_dma_alloc(sc);
1919 /* Set default tuneable values. */
1920 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
1921 sc->bge_rx_coal_ticks = bge_rx_coal_ticks;
1922 sc->bge_tx_coal_ticks = bge_tx_coal_ticks;
1923 sc->bge_rx_max_coal_bds = bge_rx_max_coal_bds;
1924 sc->bge_tx_max_coal_bds = bge_tx_max_coal_bds;
1926 /* Set up ifnet structure */
1928 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1929 ifp->if_ioctl = bge_ioctl;
1930 ifp->if_start = bge_start;
1931 #ifdef DEVICE_POLLING
1932 ifp->if_poll = bge_poll;
1934 ifp->if_watchdog = bge_watchdog;
1935 ifp->if_init = bge_init;
1936 ifp->if_mtu = ETHERMTU;
1937 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1938 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1939 ifq_set_ready(&ifp->if_snd);
1942 * 5700 B0 chips do not support checksumming correctly due
1945 if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) {
1946 ifp->if_capabilities |= IFCAP_HWCSUM;
1947 ifp->if_hwassist = BGE_CSUM_FEATURES;
1949 ifp->if_capenable = ifp->if_capabilities;
1952 * Figure out what sort of media we have by checking the
1953 * hardware config word in the first 32k of NIC internal memory,
1954 * or fall back to examining the EEPROM if necessary.
1955 * Note: on some BCM5700 cards, this value appears to be unset.
1956 * If that's the case, we have to rely on identifying the NIC
1957 * by its PCI subsystem ID, as we do below for the SysKonnect
1960 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
1961 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
1963 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
1965 device_printf(dev, "failed to read EEPROM\n");
1969 hwcfg = ntohl(hwcfg);
1972 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
1973 sc->bge_flags |= BGE_FLAG_TBI;
1975 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
1976 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41)
1977 sc->bge_flags |= BGE_FLAG_TBI;
1979 if (sc->bge_flags & BGE_FLAG_TBI) {
1980 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
1981 bge_ifmedia_upd, bge_ifmedia_sts);
1982 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
1983 ifmedia_add(&sc->bge_ifmedia,
1984 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
1985 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1986 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
1987 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
1990 * Do transceiver setup.
1992 if (mii_phy_probe(dev, &sc->bge_miibus,
1993 bge_ifmedia_upd, bge_ifmedia_sts)) {
1994 device_printf(dev, "MII without any PHY!\n");
2001 * When using the BCM5701 in PCI-X mode, data corruption has
2002 * been observed in the first few bytes of some received packets.
2003 * Aligning the packet buffer in memory eliminates the corruption.
2004 * Unfortunately, this misaligns the packet payloads. On platforms
2005 * which do not support unaligned accesses, we will realign the
2006 * payloads by copying the received packets.
2008 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
2009 (sc->bge_flags & BGE_FLAG_PCIX))
2010 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
2012 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2013 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
2014 sc->bge_link_upd = bge_bcm5700_link_upd;
2015 sc->bge_link_chg = BGE_MACSTAT_MI_INTERRUPT;
2016 } else if (sc->bge_flags & BGE_FLAG_TBI) {
2017 sc->bge_link_upd = bge_tbi_link_upd;
2018 sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED;
2020 sc->bge_link_upd = bge_copper_link_upd;
2021 sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED;
2025 * Create sysctl nodes.
2027 sysctl_ctx_init(&sc->bge_sysctl_ctx);
2028 sc->bge_sysctl_tree = SYSCTL_ADD_NODE(&sc->bge_sysctl_ctx,
2029 SYSCTL_STATIC_CHILDREN(_hw),
2031 device_get_nameunit(dev),
2033 if (sc->bge_sysctl_tree == NULL) {
2034 device_printf(dev, "can't add sysctl node\n");
2039 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2040 SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2041 OID_AUTO, "rx_coal_ticks",
2042 CTLTYPE_INT | CTLFLAG_RW,
2043 sc, 0, bge_sysctl_rx_coal_ticks, "I",
2044 "Receive coalescing ticks (usec).");
2045 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2046 SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2047 OID_AUTO, "tx_coal_ticks",
2048 CTLTYPE_INT | CTLFLAG_RW,
2049 sc, 0, bge_sysctl_tx_coal_ticks, "I",
2050 "Transmit coalescing ticks (usec).");
2051 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2052 SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2053 OID_AUTO, "rx_max_coal_bds",
2054 CTLTYPE_INT | CTLFLAG_RW,
2055 sc, 0, bge_sysctl_rx_max_coal_bds, "I",
2056 "Receive max coalesced BD count.");
2057 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2058 SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2059 OID_AUTO, "tx_max_coal_bds",
2060 CTLTYPE_INT | CTLFLAG_RW,
2061 sc, 0, bge_sysctl_tx_max_coal_bds, "I",
2062 "Transmit max coalesced BD count.");
2065 * Call MI attach routine.
2067 ether_ifattach(ifp, ether_addr, NULL);
2069 error = bus_setup_intr(dev, sc->bge_irq, INTR_MPSAFE,
2070 bge_intr, sc, &sc->bge_intrhand,
2071 ifp->if_serializer);
2073 ether_ifdetach(ifp);
2074 device_printf(dev, "couldn't set up irq\n");
2078 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->bge_irq));
2079 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
2088 bge_detach(device_t dev)
2090 struct bge_softc *sc = device_get_softc(dev);
2092 if (device_is_attached(dev)) {
2093 struct ifnet *ifp = &sc->arpcom.ac_if;
2095 lwkt_serialize_enter(ifp->if_serializer);
2098 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2099 lwkt_serialize_exit(ifp->if_serializer);
2101 ether_ifdetach(ifp);
2104 if (sc->bge_flags & BGE_FLAG_TBI)
2105 ifmedia_removeall(&sc->bge_ifmedia);
2107 device_delete_child(dev, sc->bge_miibus);
2108 bus_generic_detach(dev);
2110 if (sc->bge_irq != NULL)
2111 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2113 if (sc->bge_res != NULL)
2114 bus_release_resource(dev, SYS_RES_MEMORY,
2115 BGE_PCI_BAR0, sc->bge_res);
2117 if (sc->bge_sysctl_tree != NULL)
2118 sysctl_ctx_free(&sc->bge_sysctl_ctx);
2126 bge_reset(struct bge_softc *sc)
2129 uint32_t cachesize, command, pcistate, reset;
2130 void (*write_op)(struct bge_softc *, uint32_t, uint32_t);
2135 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
2136 sc->bge_asicrev != BGE_ASICREV_BCM5906) {
2137 if (sc->bge_flags & BGE_FLAG_PCIE)
2138 write_op = bge_writemem_direct;
2140 write_op = bge_writemem_ind;
2142 write_op = bge_writereg_ind;
2145 /* Save some important PCI state. */
2146 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2147 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2148 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2150 pci_write_config(dev, BGE_PCI_MISC_CTL,
2151 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2152 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2154 /* Disable fastboot on controllers that support it. */
2155 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
2156 sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2157 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2159 if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n");
2160 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2164 * Write the magic number to SRAM at offset 0xB50.
2165 * When firmware finishes its initialization it will
2166 * write ~BGE_MAGIC_NUMBER to the same location.
2168 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2170 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2172 /* XXX: Broadcom Linux driver. */
2173 if (sc->bge_flags & BGE_FLAG_PCIE) {
2174 if (CSR_READ_4(sc, 0x7e2c) == 0x60) /* PCIE 1.0 */
2175 CSR_WRITE_4(sc, 0x7e2c, 0x20);
2176 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2177 /* Prevent PCIE link training during global reset */
2178 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2184 * Set GPHY Power Down Override to leave GPHY
2185 * powered up in D0 uninitialized.
2187 if (BGE_IS_5705_PLUS(sc))
2188 reset |= 0x04000000;
2190 /* Issue global reset */
2191 write_op(sc, BGE_MISC_CFG, reset);
2193 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2194 uint32_t status, ctrl;
2196 status = CSR_READ_4(sc, BGE_VCPU_STATUS);
2197 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
2198 status | BGE_VCPU_STATUS_DRV_RESET);
2199 ctrl = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
2200 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
2201 ctrl & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
2206 /* XXX: Broadcom Linux driver. */
2207 if (sc->bge_flags & BGE_FLAG_PCIE) {
2208 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2211 DELAY(500000); /* wait for link training to complete */
2212 v = pci_read_config(dev, 0xc4, 4);
2213 pci_write_config(dev, 0xc4, v | (1<<15), 4);
2216 * Set PCIE max payload size to 128 bytes and
2217 * clear error status.
2219 pci_write_config(dev, 0xd8, 0xf5000, 4);
2222 /* Reset some of the PCI state that got zapped by reset */
2223 pci_write_config(dev, BGE_PCI_MISC_CTL,
2224 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2225 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2226 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2227 pci_write_config(dev, BGE_PCI_CMD, command, 4);
2228 write_op(sc, BGE_MISC_CFG, (65 << 1));
2230 /* Enable memory arbiter. */
2231 if (BGE_IS_5714_FAMILY(sc)) {
2234 val = CSR_READ_4(sc, BGE_MARB_MODE);
2235 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2237 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2240 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2241 for (i = 0; i < BGE_TIMEOUT; i++) {
2242 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2243 if (val & BGE_VCPU_STATUS_INIT_DONE)
2247 if (i == BGE_TIMEOUT) {
2248 if_printf(&sc->arpcom.ac_if, "reset timed out\n");
2253 * Poll until we see the 1's complement of the magic number.
2254 * This indicates that the firmware initialization
2257 for (i = 0; i < BGE_FIRMWARE_TIMEOUT; i++) {
2258 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2259 if (val == ~BGE_MAGIC_NUMBER)
2263 if (i == BGE_FIRMWARE_TIMEOUT) {
2264 if_printf(&sc->arpcom.ac_if, "firmware handshake "
2265 "timed out, found 0x%08x\n", val);
2271 * XXX Wait for the value of the PCISTATE register to
2272 * return to its original pre-reset state. This is a
2273 * fairly good indicator of reset completion. If we don't
2274 * wait for the reset to fully complete, trying to read
2275 * from the device's non-PCI registers may yield garbage
2278 for (i = 0; i < BGE_TIMEOUT; i++) {
2279 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2284 if (sc->bge_flags & BGE_FLAG_PCIE) {
2285 reset = bge_readmem_ind(sc, 0x7c00);
2286 bge_writemem_ind(sc, 0x7c00, reset | (1 << 25));
2289 /* Fix up byte swapping */
2290 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
2291 BGE_MODECTL_BYTESWAP_DATA);
2293 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2296 * The 5704 in TBI mode apparently needs some special
2297 * adjustment to insure the SERDES drive level is set
2300 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
2301 (sc->bge_flags & BGE_FLAG_TBI)) {
2304 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2305 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2306 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2309 /* XXX: Broadcom Linux driver. */
2310 if ((sc->bge_flags & BGE_FLAG_PCIE) &&
2311 sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2314 v = CSR_READ_4(sc, 0x7c00);
2315 CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2322 * Frame reception handling. This is called if there's a frame
2323 * on the receive return list.
2325 * Note: we have to be able to handle two possibilities here:
2326 * 1) the frame is from the jumbo recieve ring
2327 * 2) the frame is from the standard receive ring
2331 bge_rxeof(struct bge_softc *sc)
2334 int stdcnt = 0, jumbocnt = 0;
2335 struct mbuf_chain chain[MAXCPU];
2337 if (sc->bge_rx_saved_considx ==
2338 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx)
2341 ether_input_chain_init(chain);
2343 ifp = &sc->arpcom.ac_if;
2345 while (sc->bge_rx_saved_considx !=
2346 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2347 struct bge_rx_bd *cur_rx;
2349 struct mbuf *m = NULL;
2350 uint16_t vlan_tag = 0;
2354 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2356 rxidx = cur_rx->bge_idx;
2357 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2360 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2362 vlan_tag = cur_rx->bge_vlan_tag;
2365 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2366 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2369 if (rxidx != sc->bge_jumbo) {
2371 if_printf(ifp, "sw jumbo index(%d) "
2372 "and hw jumbo index(%d) mismatch, drop!\n",
2373 sc->bge_jumbo, rxidx);
2374 bge_setup_rxdesc_jumbo(sc, rxidx);
2378 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx].bge_mbuf;
2379 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2381 bge_setup_rxdesc_jumbo(sc, sc->bge_jumbo);
2384 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 0)) {
2386 bge_setup_rxdesc_jumbo(sc, sc->bge_jumbo);
2390 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2393 if (rxidx != sc->bge_std) {
2395 if_printf(ifp, "sw std index(%d) "
2396 "and hw std index(%d) mismatch, drop!\n",
2397 sc->bge_std, rxidx);
2398 bge_setup_rxdesc_std(sc, rxidx);
2402 m = sc->bge_cdata.bge_rx_std_chain[rxidx].bge_mbuf;
2403 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2405 bge_setup_rxdesc_std(sc, sc->bge_std);
2408 if (bge_newbuf_std(sc, sc->bge_std, 0)) {
2410 bge_setup_rxdesc_std(sc, sc->bge_std);
2418 * The i386 allows unaligned accesses, but for other
2419 * platforms we must make sure the payload is aligned.
2421 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
2422 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2424 m->m_data += ETHER_ALIGN;
2427 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2428 m->m_pkthdr.rcvif = ifp;
2430 if (ifp->if_capenable & IFCAP_RXCSUM) {
2431 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2432 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2433 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2434 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2436 if ((cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) &&
2437 m->m_pkthdr.len >= BGE_MIN_FRAME) {
2438 m->m_pkthdr.csum_data =
2439 cur_rx->bge_tcp_udp_csum;
2440 m->m_pkthdr.csum_flags |=
2441 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2446 * If we received a packet with a vlan tag, pass it
2447 * to vlan_input() instead of ether_input().
2450 m->m_flags |= M_VLANTAG;
2451 m->m_pkthdr.ether_vlantag = vlan_tag;
2452 have_tag = vlan_tag = 0;
2454 ether_input_chain(ifp, m, NULL, chain);
2457 ether_input_dispatch(chain);
2459 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2461 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2463 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2467 bge_txeof(struct bge_softc *sc)
2469 struct bge_tx_bd *cur_tx = NULL;
2472 if (sc->bge_tx_saved_considx ==
2473 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
2476 ifp = &sc->arpcom.ac_if;
2479 * Go through our tx ring and free mbufs for those
2480 * frames that have been sent.
2482 while (sc->bge_tx_saved_considx !=
2483 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2486 idx = sc->bge_tx_saved_considx;
2487 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2488 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2490 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2491 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
2492 sc->bge_cdata.bge_tx_dmamap[idx]);
2493 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2494 sc->bge_cdata.bge_tx_chain[idx] = NULL;
2497 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2501 if (cur_tx != NULL &&
2502 (BGE_TX_RING_CNT - sc->bge_txcnt) >=
2503 (BGE_NSEG_RSVD + BGE_NSEG_SPARE))
2504 ifp->if_flags &= ~IFF_OACTIVE;
2506 if (sc->bge_txcnt == 0)
2509 if (!ifq_is_empty(&ifp->if_snd))
2513 #ifdef DEVICE_POLLING
2516 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2518 struct bge_softc *sc = ifp->if_softc;
2523 bge_disable_intr(sc);
2525 case POLL_DEREGISTER:
2526 bge_enable_intr(sc);
2528 case POLL_AND_CHECK_STATUS:
2530 * Process link state changes.
2532 status = CSR_READ_4(sc, BGE_MAC_STS);
2533 if ((status & sc->bge_link_chg) || sc->bge_link_evt) {
2534 sc->bge_link_evt = 0;
2535 sc->bge_link_upd(sc, status);
2539 if (ifp->if_flags & IFF_RUNNING) {
2552 struct bge_softc *sc = xsc;
2553 struct ifnet *ifp = &sc->arpcom.ac_if;
2559 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
2560 * disable interrupts by writing nonzero like we used to, since with
2561 * our current organization this just gives complications and
2562 * pessimizations for re-enabling interrupts. We used to have races
2563 * instead of the necessary complications. Disabling interrupts
2564 * would just reduce the chance of a status update while we are
2565 * running (by switching to the interrupt-mode coalescence
2566 * parameters), but this chance is already very low so it is more
2567 * efficient to get another interrupt than prevent it.
2569 * We do the ack first to ensure another interrupt if there is a
2570 * status update after the ack. We don't check for the status
2571 * changing later because it is more efficient to get another
2572 * interrupt than prevent it, not quite as above (not checking is
2573 * a smaller optimization than not toggling the interrupt enable,
2574 * since checking doesn't involve PCI accesses and toggling require
2575 * the status check). So toggling would probably be a pessimization
2576 * even with MSI. It would only be needed for using a task queue.
2578 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
2581 * Process link state changes.
2583 status = CSR_READ_4(sc, BGE_MAC_STS);
2584 if ((status & sc->bge_link_chg) || sc->bge_link_evt) {
2585 sc->bge_link_evt = 0;
2586 sc->bge_link_upd(sc, status);
2589 if (ifp->if_flags & IFF_RUNNING) {
2590 /* Check RX return ring producer/consumer */
2593 /* Check TX ring producer/consumer */
2597 if (sc->bge_coal_chg)
2598 bge_coal_change(sc);
2604 struct bge_softc *sc = xsc;
2605 struct ifnet *ifp = &sc->arpcom.ac_if;
2607 lwkt_serialize_enter(ifp->if_serializer);
2609 if (BGE_IS_5705_PLUS(sc))
2610 bge_stats_update_regs(sc);
2612 bge_stats_update(sc);
2614 if (sc->bge_flags & BGE_FLAG_TBI) {
2616 * Since in TBI mode auto-polling can't be used we should poll
2617 * link status manually. Here we register pending link event
2618 * and trigger interrupt.
2621 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
2622 } else if (!sc->bge_link) {
2623 mii_tick(device_get_softc(sc->bge_miibus));
2626 callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2628 lwkt_serialize_exit(ifp->if_serializer);
2632 bge_stats_update_regs(struct bge_softc *sc)
2634 struct ifnet *ifp = &sc->arpcom.ac_if;
2635 struct bge_mac_stats_regs stats;
2639 s = (uint32_t *)&stats;
2640 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2641 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2645 ifp->if_collisions +=
2646 (stats.dot3StatsSingleCollisionFrames +
2647 stats.dot3StatsMultipleCollisionFrames +
2648 stats.dot3StatsExcessiveCollisions +
2649 stats.dot3StatsLateCollisions) -
2654 bge_stats_update(struct bge_softc *sc)
2656 struct ifnet *ifp = &sc->arpcom.ac_if;
2659 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2661 #define READ_STAT(sc, stats, stat) \
2662 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2664 ifp->if_collisions +=
2665 (READ_STAT(sc, stats,
2666 txstats.dot3StatsSingleCollisionFrames.bge_addr_lo) +
2667 READ_STAT(sc, stats,
2668 txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo) +
2669 READ_STAT(sc, stats,
2670 txstats.dot3StatsExcessiveCollisions.bge_addr_lo) +
2671 READ_STAT(sc, stats,
2672 txstats.dot3StatsLateCollisions.bge_addr_lo)) -
2678 ifp->if_collisions +=
2679 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2680 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2681 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2682 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2688 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2689 * pointers to descriptors.
2692 bge_encap(struct bge_softc *sc, struct mbuf **m_head0, uint32_t *txidx)
2694 struct bge_tx_bd *d = NULL;
2695 uint16_t csum_flags = 0;
2696 bus_dma_segment_t segs[BGE_NSEG_NEW];
2698 int error, maxsegs, nsegs, idx, i;
2699 struct mbuf *m_head = *m_head0;
2701 if (m_head->m_pkthdr.csum_flags) {
2702 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2703 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2704 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2705 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2706 if (m_head->m_flags & M_LASTFRAG)
2707 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2708 else if (m_head->m_flags & M_FRAG)
2709 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2713 map = sc->bge_cdata.bge_tx_dmamap[idx];
2715 maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - BGE_NSEG_RSVD;
2716 KASSERT(maxsegs >= BGE_NSEG_SPARE,
2717 ("not enough segments %d\n", maxsegs));
2719 if (maxsegs > BGE_NSEG_NEW)
2720 maxsegs = BGE_NSEG_NEW;
2723 * Pad outbound frame to BGE_MIN_FRAME for an unusual reason.
2724 * The bge hardware will pad out Tx runts to BGE_MIN_FRAME,
2725 * but when such padded frames employ the bge IP/TCP checksum
2726 * offload, the hardware checksum assist gives incorrect results
2727 * (possibly from incorporating its own padding into the UDP/TCP
2728 * checksum; who knows). If we pad such runts with zeros, the
2729 * onboard checksum comes out correct.
2731 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
2732 m_head->m_pkthdr.len < BGE_MIN_FRAME) {
2733 error = m_devpad(m_head, BGE_MIN_FRAME);
2738 error = bus_dmamap_load_mbuf_defrag(sc->bge_cdata.bge_tx_mtag, map,
2739 m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
2744 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
2746 for (i = 0; ; i++) {
2747 d = &sc->bge_ldata.bge_tx_ring[idx];
2749 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
2750 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
2751 d->bge_len = segs[i].ds_len;
2752 d->bge_flags = csum_flags;
2756 BGE_INC(idx, BGE_TX_RING_CNT);
2758 /* Mark the last segment as end of packet... */
2759 d->bge_flags |= BGE_TXBDFLAG_END;
2761 /* Set vlan tag to the first segment of the packet. */
2762 d = &sc->bge_ldata.bge_tx_ring[*txidx];
2763 if (m_head->m_flags & M_VLANTAG) {
2764 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2765 d->bge_vlan_tag = m_head->m_pkthdr.ether_vlantag;
2767 d->bge_vlan_tag = 0;
2771 * Insure that the map for this transmission is placed at
2772 * the array index of the last descriptor in this chain.
2774 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
2775 sc->bge_cdata.bge_tx_dmamap[idx] = map;
2776 sc->bge_cdata.bge_tx_chain[idx] = m_head;
2777 sc->bge_txcnt += nsegs;
2779 BGE_INC(idx, BGE_TX_RING_CNT);
2790 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2791 * to the mbuf data regions directly in the transmit descriptors.
2794 bge_start(struct ifnet *ifp)
2796 struct bge_softc *sc = ifp->if_softc;
2797 struct mbuf *m_head = NULL;
2801 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
2804 prodidx = sc->bge_tx_prodidx;
2807 while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
2808 m_head = ifq_dequeue(&ifp->if_snd, NULL);
2814 * The code inside the if() block is never reached since we
2815 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
2816 * requests to checksum TCP/UDP in a fragmented packet.
2819 * safety overkill. If this is a fragmented packet chain
2820 * with delayed TCP/UDP checksums, then only encapsulate
2821 * it if we have enough descriptors to handle the entire
2823 * (paranoia -- may not actually be needed)
2825 if ((m_head->m_flags & M_FIRSTFRAG) &&
2826 (m_head->m_pkthdr.csum_flags & CSUM_DELAY_DATA)) {
2827 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2828 m_head->m_pkthdr.csum_data + BGE_NSEG_RSVD) {
2829 ifp->if_flags |= IFF_OACTIVE;
2830 ifq_prepend(&ifp->if_snd, m_head);
2836 * Sanity check: avoid coming within BGE_NSEG_RSVD
2837 * descriptors of the end of the ring. Also make
2838 * sure there are BGE_NSEG_SPARE descriptors for
2839 * jumbo buffers' defragmentation.
2841 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2842 (BGE_NSEG_RSVD + BGE_NSEG_SPARE)) {
2843 ifp->if_flags |= IFF_OACTIVE;
2844 ifq_prepend(&ifp->if_snd, m_head);
2849 * Pack the data into the transmit ring. If we
2850 * don't have room, set the OACTIVE flag and wait
2851 * for the NIC to drain the ring.
2853 if (bge_encap(sc, &m_head, &prodidx)) {
2854 ifp->if_flags |= IFF_OACTIVE;
2860 ETHER_BPF_MTAP(ifp, m_head);
2867 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2868 /* 5700 b2 errata */
2869 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
2870 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2872 sc->bge_tx_prodidx = prodidx;
2875 * Set a timeout in case the chip goes out to lunch.
2883 struct bge_softc *sc = xsc;
2884 struct ifnet *ifp = &sc->arpcom.ac_if;
2887 ASSERT_SERIALIZED(ifp->if_serializer);
2889 if (ifp->if_flags & IFF_RUNNING)
2892 /* Cancel pending I/O and flush buffers. */
2898 * Init the various state machines, ring
2899 * control blocks and firmware.
2901 if (bge_blockinit(sc)) {
2902 if_printf(ifp, "initialization failure\n");
2908 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
2909 ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
2911 /* Load our MAC address. */
2912 m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
2913 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2914 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2916 /* Enable or disable promiscuous mode as needed. */
2919 /* Program multicast filter. */
2923 if (bge_init_rx_ring_std(sc)) {
2924 if_printf(ifp, "RX ring initialization failed\n");
2930 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
2931 * memory to insure that the chip has in fact read the first
2932 * entry of the ring.
2934 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
2936 for (i = 0; i < 10; i++) {
2938 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
2939 if (v == (MCLBYTES - ETHER_ALIGN))
2943 if_printf(ifp, "5705 A0 chip failed to load RX ring\n");
2946 /* Init jumbo RX ring. */
2947 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) {
2948 if (bge_init_rx_ring_jumbo(sc)) {
2949 if_printf(ifp, "Jumbo RX ring initialization failed\n");
2955 /* Init our RX return ring index */
2956 sc->bge_rx_saved_considx = 0;
2959 bge_init_tx_ring(sc);
2961 /* Turn on transmitter */
2962 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
2964 /* Turn on receiver */
2965 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2967 /* Tell firmware we're alive. */
2968 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2970 /* Enable host interrupts if polling(4) is not enabled. */
2971 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
2972 #ifdef DEVICE_POLLING
2973 if (ifp->if_flags & IFF_POLLING)
2974 bge_disable_intr(sc);
2977 bge_enable_intr(sc);
2979 bge_ifmedia_upd(ifp);
2981 ifp->if_flags |= IFF_RUNNING;
2982 ifp->if_flags &= ~IFF_OACTIVE;
2984 callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2988 * Set media options.
2991 bge_ifmedia_upd(struct ifnet *ifp)
2993 struct bge_softc *sc = ifp->if_softc;
2995 /* If this is a 1000baseX NIC, enable the TBI port. */
2996 if (sc->bge_flags & BGE_FLAG_TBI) {
2997 struct ifmedia *ifm = &sc->bge_ifmedia;
2999 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3002 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3005 * The BCM5704 ASIC appears to have a special
3006 * mechanism for programming the autoneg
3007 * advertisement registers in TBI mode.
3009 if (!bge_fake_autoneg &&
3010 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3013 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3014 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3015 sgdig |= BGE_SGDIGCFG_AUTO |
3016 BGE_SGDIGCFG_PAUSE_CAP |
3017 BGE_SGDIGCFG_ASYM_PAUSE;
3018 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3019 sgdig | BGE_SGDIGCFG_SEND);
3021 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3025 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3026 BGE_CLRBIT(sc, BGE_MAC_MODE,
3027 BGE_MACMODE_HALF_DUPLEX);
3029 BGE_SETBIT(sc, BGE_MAC_MODE,
3030 BGE_MACMODE_HALF_DUPLEX);
3037 struct mii_data *mii = device_get_softc(sc->bge_miibus);
3041 if (mii->mii_instance) {
3042 struct mii_softc *miisc;
3044 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3045 mii_phy_reset(miisc);
3053 * Report current media status.
3056 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3058 struct bge_softc *sc = ifp->if_softc;
3060 if (sc->bge_flags & BGE_FLAG_TBI) {
3061 ifmr->ifm_status = IFM_AVALID;
3062 ifmr->ifm_active = IFM_ETHER;
3063 if (CSR_READ_4(sc, BGE_MAC_STS) &
3064 BGE_MACSTAT_TBI_PCS_SYNCHED) {
3065 ifmr->ifm_status |= IFM_ACTIVE;
3067 ifmr->ifm_active |= IFM_NONE;
3071 ifmr->ifm_active |= IFM_1000_SX;
3072 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3073 ifmr->ifm_active |= IFM_HDX;
3075 ifmr->ifm_active |= IFM_FDX;
3077 struct mii_data *mii = device_get_softc(sc->bge_miibus);
3080 ifmr->ifm_active = mii->mii_media_active;
3081 ifmr->ifm_status = mii->mii_media_status;
3086 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
3088 struct bge_softc *sc = ifp->if_softc;
3089 struct ifreq *ifr = (struct ifreq *)data;
3090 int mask, error = 0;
3092 ASSERT_SERIALIZED(ifp->if_serializer);
3096 if ((!BGE_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) ||
3097 (BGE_IS_JUMBO_CAPABLE(sc) &&
3098 ifr->ifr_mtu > BGE_JUMBO_MTU)) {
3100 } else if (ifp->if_mtu != ifr->ifr_mtu) {
3101 ifp->if_mtu = ifr->ifr_mtu;
3102 ifp->if_flags &= ~IFF_RUNNING;
3107 if (ifp->if_flags & IFF_UP) {
3108 if (ifp->if_flags & IFF_RUNNING) {
3109 mask = ifp->if_flags ^ sc->bge_if_flags;
3112 * If only the state of the PROMISC flag
3113 * changed, then just use the 'set promisc
3114 * mode' command instead of reinitializing
3115 * the entire NIC. Doing a full re-init
3116 * means reloading the firmware and waiting
3117 * for it to start up, which may take a
3118 * second or two. Similarly for ALLMULTI.
3120 if (mask & IFF_PROMISC)
3122 if (mask & IFF_ALLMULTI)
3128 if (ifp->if_flags & IFF_RUNNING)
3131 sc->bge_if_flags = ifp->if_flags;
3135 if (ifp->if_flags & IFF_RUNNING)
3140 if (sc->bge_flags & BGE_FLAG_TBI) {
3141 error = ifmedia_ioctl(ifp, ifr,
3142 &sc->bge_ifmedia, command);
3144 struct mii_data *mii;
3146 mii = device_get_softc(sc->bge_miibus);
3147 error = ifmedia_ioctl(ifp, ifr,
3148 &mii->mii_media, command);
3152 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3153 if (mask & IFCAP_HWCSUM) {
3154 ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
3155 if (IFCAP_HWCSUM & ifp->if_capenable)
3156 ifp->if_hwassist = BGE_CSUM_FEATURES;
3158 ifp->if_hwassist = 0;
3162 error = ether_ioctl(ifp, command, data);
3169 bge_watchdog(struct ifnet *ifp)
3171 struct bge_softc *sc = ifp->if_softc;
3173 if_printf(ifp, "watchdog timeout -- resetting\n");
3175 ifp->if_flags &= ~IFF_RUNNING;
3180 if (!ifq_is_empty(&ifp->if_snd))
3185 * Stop the adapter and free any mbufs allocated to the
3189 bge_stop(struct bge_softc *sc)
3191 struct ifnet *ifp = &sc->arpcom.ac_if;
3192 struct ifmedia_entry *ifm;
3193 struct mii_data *mii = NULL;
3196 ASSERT_SERIALIZED(ifp->if_serializer);
3198 if ((sc->bge_flags & BGE_FLAG_TBI) == 0)
3199 mii = device_get_softc(sc->bge_miibus);
3201 callout_stop(&sc->bge_stat_timer);
3204 * Disable all of the receiver blocks
3206 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3207 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3208 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3209 if (!BGE_IS_5705_PLUS(sc))
3210 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3211 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3212 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3213 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3216 * Disable all of the transmit blocks
3218 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3219 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3220 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3221 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3222 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3223 if (!BGE_IS_5705_PLUS(sc))
3224 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3225 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3228 * Shut down all of the memory managers and related
3231 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3232 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3233 if (!BGE_IS_5705_PLUS(sc))
3234 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3235 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3236 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3237 if (!BGE_IS_5705_PLUS(sc)) {
3238 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3239 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3242 /* Disable host interrupts. */
3243 bge_disable_intr(sc);
3246 * Tell firmware we're shutting down.
3248 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3250 /* Free the RX lists. */
3251 bge_free_rx_ring_std(sc);
3253 /* Free jumbo RX list. */
3254 if (BGE_IS_JUMBO_CAPABLE(sc))
3255 bge_free_rx_ring_jumbo(sc);
3257 /* Free TX buffers. */
3258 bge_free_tx_ring(sc);
3261 * Isolate/power down the PHY, but leave the media selection
3262 * unchanged so that things will be put back to normal when
3263 * we bring the interface back up.
3265 * 'mii' may be NULL in the following cases:
3266 * - The device uses TBI.
3267 * - bge_stop() is called by bge_detach().
3270 itmp = ifp->if_flags;
3271 ifp->if_flags |= IFF_UP;
3272 ifm = mii->mii_media.ifm_cur;
3273 mtmp = ifm->ifm_media;
3274 ifm->ifm_media = IFM_ETHER|IFM_NONE;
3276 ifm->ifm_media = mtmp;
3277 ifp->if_flags = itmp;
3281 sc->bge_coal_chg = 0;
3283 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3285 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3290 * Stop all chip I/O so that the kernel's probe routines don't
3291 * get confused by errant DMAs when rebooting.
3294 bge_shutdown(device_t dev)
3296 struct bge_softc *sc = device_get_softc(dev);
3297 struct ifnet *ifp = &sc->arpcom.ac_if;
3299 lwkt_serialize_enter(ifp->if_serializer);
3302 lwkt_serialize_exit(ifp->if_serializer);
3306 bge_suspend(device_t dev)
3308 struct bge_softc *sc = device_get_softc(dev);
3309 struct ifnet *ifp = &sc->arpcom.ac_if;
3311 lwkt_serialize_enter(ifp->if_serializer);
3313 lwkt_serialize_exit(ifp->if_serializer);
3319 bge_resume(device_t dev)
3321 struct bge_softc *sc = device_get_softc(dev);
3322 struct ifnet *ifp = &sc->arpcom.ac_if;
3324 lwkt_serialize_enter(ifp->if_serializer);
3326 if (ifp->if_flags & IFF_UP) {
3329 if (!ifq_is_empty(&ifp->if_snd))
3333 lwkt_serialize_exit(ifp->if_serializer);
3339 bge_setpromisc(struct bge_softc *sc)
3341 struct ifnet *ifp = &sc->arpcom.ac_if;
3343 if (ifp->if_flags & IFF_PROMISC)
3344 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3346 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3350 bge_dma_free(struct bge_softc *sc)
3354 /* Destroy RX mbuf DMA stuffs. */
3355 if (sc->bge_cdata.bge_rx_mtag != NULL) {
3356 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3357 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
3358 sc->bge_cdata.bge_rx_std_dmamap[i]);
3360 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
3361 sc->bge_cdata.bge_rx_tmpmap);
3362 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
3365 /* Destroy TX mbuf DMA stuffs. */
3366 if (sc->bge_cdata.bge_tx_mtag != NULL) {
3367 for (i = 0; i < BGE_TX_RING_CNT; i++) {
3368 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
3369 sc->bge_cdata.bge_tx_dmamap[i]);
3371 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
3374 /* Destroy standard RX ring */
3375 bge_dma_block_free(sc->bge_cdata.bge_rx_std_ring_tag,
3376 sc->bge_cdata.bge_rx_std_ring_map,
3377 sc->bge_ldata.bge_rx_std_ring);
3379 if (BGE_IS_JUMBO_CAPABLE(sc))
3380 bge_free_jumbo_mem(sc);
3382 /* Destroy RX return ring */
3383 bge_dma_block_free(sc->bge_cdata.bge_rx_return_ring_tag,
3384 sc->bge_cdata.bge_rx_return_ring_map,
3385 sc->bge_ldata.bge_rx_return_ring);
3387 /* Destroy TX ring */
3388 bge_dma_block_free(sc->bge_cdata.bge_tx_ring_tag,
3389 sc->bge_cdata.bge_tx_ring_map,
3390 sc->bge_ldata.bge_tx_ring);
3392 /* Destroy status block */
3393 bge_dma_block_free(sc->bge_cdata.bge_status_tag,
3394 sc->bge_cdata.bge_status_map,
3395 sc->bge_ldata.bge_status_block);
3397 /* Destroy statistics block */
3398 bge_dma_block_free(sc->bge_cdata.bge_stats_tag,
3399 sc->bge_cdata.bge_stats_map,
3400 sc->bge_ldata.bge_stats);
3402 /* Destroy the parent tag */
3403 if (sc->bge_cdata.bge_parent_tag != NULL)
3404 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
3408 bge_dma_alloc(struct bge_softc *sc)
3410 struct ifnet *ifp = &sc->arpcom.ac_if;
3414 * Allocate the parent bus DMA tag appropriate for PCI.
3416 error = bus_dma_tag_create(NULL, 1, 0,
3417 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3419 BUS_SPACE_MAXSIZE_32BIT, 0,
3420 BUS_SPACE_MAXSIZE_32BIT,
3421 0, &sc->bge_cdata.bge_parent_tag);
3423 if_printf(ifp, "could not allocate parent dma tag\n");
3428 * Create DMA tag and maps for RX mbufs.
3430 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0,
3431 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3432 NULL, NULL, MCLBYTES, 1, MCLBYTES,
3433 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
3434 &sc->bge_cdata.bge_rx_mtag);
3436 if_printf(ifp, "could not allocate RX mbuf dma tag\n");
3440 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag,
3441 BUS_DMA_WAITOK, &sc->bge_cdata.bge_rx_tmpmap);
3443 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
3444 sc->bge_cdata.bge_rx_mtag = NULL;
3448 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3449 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag,
3451 &sc->bge_cdata.bge_rx_std_dmamap[i]);
3455 for (j = 0; j < i; ++j) {
3456 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
3457 sc->bge_cdata.bge_rx_std_dmamap[j]);
3459 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
3460 sc->bge_cdata.bge_rx_mtag = NULL;
3462 if_printf(ifp, "could not create DMA map for RX\n");
3468 * Create DMA tag and maps for TX mbufs.
3470 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0,
3471 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3473 BGE_JUMBO_FRAMELEN, BGE_NSEG_NEW, MCLBYTES,
3474 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK |
3476 &sc->bge_cdata.bge_tx_mtag);
3478 if_printf(ifp, "could not allocate TX mbuf dma tag\n");
3482 for (i = 0; i < BGE_TX_RING_CNT; i++) {
3483 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag,
3484 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
3485 &sc->bge_cdata.bge_tx_dmamap[i]);
3489 for (j = 0; j < i; ++j) {
3490 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
3491 sc->bge_cdata.bge_tx_dmamap[j]);
3493 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
3494 sc->bge_cdata.bge_tx_mtag = NULL;
3496 if_printf(ifp, "could not create DMA map for TX\n");
3502 * Create DMA stuffs for standard RX ring.
3504 error = bge_dma_block_alloc(sc, BGE_STD_RX_RING_SZ,
3505 &sc->bge_cdata.bge_rx_std_ring_tag,
3506 &sc->bge_cdata.bge_rx_std_ring_map,
3507 (void *)&sc->bge_ldata.bge_rx_std_ring,
3508 &sc->bge_ldata.bge_rx_std_ring_paddr);
3510 if_printf(ifp, "could not create std RX ring\n");
3515 * Create jumbo buffer pool.
3517 if (BGE_IS_JUMBO_CAPABLE(sc)) {
3518 error = bge_alloc_jumbo_mem(sc);
3520 if_printf(ifp, "could not create jumbo buffer pool\n");
3526 * Create DMA stuffs for RX return ring.
3528 error = bge_dma_block_alloc(sc, BGE_RX_RTN_RING_SZ(sc),
3529 &sc->bge_cdata.bge_rx_return_ring_tag,
3530 &sc->bge_cdata.bge_rx_return_ring_map,
3531 (void *)&sc->bge_ldata.bge_rx_return_ring,
3532 &sc->bge_ldata.bge_rx_return_ring_paddr);
3534 if_printf(ifp, "could not create RX ret ring\n");
3539 * Create DMA stuffs for TX ring.
3541 error = bge_dma_block_alloc(sc, BGE_TX_RING_SZ,
3542 &sc->bge_cdata.bge_tx_ring_tag,
3543 &sc->bge_cdata.bge_tx_ring_map,
3544 (void *)&sc->bge_ldata.bge_tx_ring,
3545 &sc->bge_ldata.bge_tx_ring_paddr);
3547 if_printf(ifp, "could not create TX ring\n");
3552 * Create DMA stuffs for status block.
3554 error = bge_dma_block_alloc(sc, BGE_STATUS_BLK_SZ,
3555 &sc->bge_cdata.bge_status_tag,
3556 &sc->bge_cdata.bge_status_map,
3557 (void *)&sc->bge_ldata.bge_status_block,
3558 &sc->bge_ldata.bge_status_block_paddr);
3560 if_printf(ifp, "could not create status block\n");
3565 * Create DMA stuffs for statistics block.
3567 error = bge_dma_block_alloc(sc, BGE_STATS_SZ,
3568 &sc->bge_cdata.bge_stats_tag,
3569 &sc->bge_cdata.bge_stats_map,
3570 (void *)&sc->bge_ldata.bge_stats,
3571 &sc->bge_ldata.bge_stats_paddr);
3573 if_printf(ifp, "could not create stats block\n");
3580 bge_dma_block_alloc(struct bge_softc *sc, bus_size_t size, bus_dma_tag_t *tag,
3581 bus_dmamap_t *map, void **addr, bus_addr_t *paddr)
3586 error = bus_dmamem_coherent(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0,
3587 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3588 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3592 *tag = dmem.dmem_tag;
3593 *map = dmem.dmem_map;
3594 *addr = dmem.dmem_addr;
3595 *paddr = dmem.dmem_busaddr;
3601 bge_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr)
3604 bus_dmamap_unload(tag, map);
3605 bus_dmamem_free(tag, addr, map);
3606 bus_dma_tag_destroy(tag);
3611 * Grrr. The link status word in the status block does
3612 * not work correctly on the BCM5700 rev AX and BX chips,
3613 * according to all available information. Hence, we have
3614 * to enable MII interrupts in order to properly obtain
3615 * async link changes. Unfortunately, this also means that
3616 * we have to read the MAC status register to detect link
3617 * changes, thereby adding an additional register access to
3618 * the interrupt handler.
3620 * XXX: perhaps link state detection procedure used for
3621 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
3624 bge_bcm5700_link_upd(struct bge_softc *sc, uint32_t status __unused)
3626 struct ifnet *ifp = &sc->arpcom.ac_if;
3627 struct mii_data *mii = device_get_softc(sc->bge_miibus);
3631 if (!sc->bge_link &&
3632 (mii->mii_media_status & IFM_ACTIVE) &&
3633 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3636 if_printf(ifp, "link UP\n");
3637 } else if (sc->bge_link &&
3638 (!(mii->mii_media_status & IFM_ACTIVE) ||
3639 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3642 if_printf(ifp, "link DOWN\n");
3645 /* Clear the interrupt. */
3646 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_MI_INTERRUPT);
3647 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
3648 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, BRGPHY_INTRS);
3652 bge_tbi_link_upd(struct bge_softc *sc, uint32_t status)
3654 struct ifnet *ifp = &sc->arpcom.ac_if;
3656 #define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE)
3659 * Sometimes PCS encoding errors are detected in
3660 * TBI mode (on fiber NICs), and for some reason
3661 * the chip will signal them as link changes.
3662 * If we get a link change event, but the 'PCS
3663 * encoding error' bit in the MAC status register
3664 * is set, don't bother doing a link check.
3665 * This avoids spurious "gigabit link up" messages
3666 * that sometimes appear on fiber NICs during
3667 * periods of heavy traffic.
3669 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3670 if (!sc->bge_link) {
3672 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3673 BGE_CLRBIT(sc, BGE_MAC_MODE,
3674 BGE_MACMODE_TBI_SEND_CFGS);
3676 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3679 if_printf(ifp, "link UP\n");
3681 ifp->if_link_state = LINK_STATE_UP;
3682 if_link_state_change(ifp);
3684 } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) {
3689 if_printf(ifp, "link DOWN\n");
3691 ifp->if_link_state = LINK_STATE_DOWN;
3692 if_link_state_change(ifp);
3696 #undef PCS_ENCODE_ERR
3698 /* Clear the attention. */
3699 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3700 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3701 BGE_MACSTAT_LINK_CHANGED);
3705 bge_copper_link_upd(struct bge_softc *sc, uint32_t status __unused)
3708 * Check that the AUTOPOLL bit is set before
3709 * processing the event as a real link change.
3710 * Turning AUTOPOLL on and off in the MII read/write
3711 * functions will often trigger a link status
3712 * interrupt for no reason.
3714 if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
3715 struct ifnet *ifp = &sc->arpcom.ac_if;
3716 struct mii_data *mii = device_get_softc(sc->bge_miibus);
3720 if (!sc->bge_link &&
3721 (mii->mii_media_status & IFM_ACTIVE) &&
3722 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3725 if_printf(ifp, "link UP\n");
3726 } else if (sc->bge_link &&
3727 (!(mii->mii_media_status & IFM_ACTIVE) ||
3728 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3731 if_printf(ifp, "link DOWN\n");
3735 /* Clear the attention. */
3736 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3737 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3738 BGE_MACSTAT_LINK_CHANGED);
3742 bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS)
3744 struct bge_softc *sc = arg1;
3746 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
3747 &sc->bge_rx_coal_ticks,
3748 BGE_RX_COAL_TICKS_CHG);
3752 bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS)
3754 struct bge_softc *sc = arg1;
3756 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
3757 &sc->bge_tx_coal_ticks,
3758 BGE_TX_COAL_TICKS_CHG);
3762 bge_sysctl_rx_max_coal_bds(SYSCTL_HANDLER_ARGS)
3764 struct bge_softc *sc = arg1;
3766 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
3767 &sc->bge_rx_max_coal_bds,
3768 BGE_RX_MAX_COAL_BDS_CHG);
3772 bge_sysctl_tx_max_coal_bds(SYSCTL_HANDLER_ARGS)
3774 struct bge_softc *sc = arg1;
3776 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
3777 &sc->bge_tx_max_coal_bds,
3778 BGE_TX_MAX_COAL_BDS_CHG);
3782 bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal,
3783 uint32_t coal_chg_mask)
3785 struct bge_softc *sc = arg1;
3786 struct ifnet *ifp = &sc->arpcom.ac_if;
3789 lwkt_serialize_enter(ifp->if_serializer);
3792 error = sysctl_handle_int(oidp, &v, 0, req);
3793 if (!error && req->newptr != NULL) {
3798 sc->bge_coal_chg |= coal_chg_mask;
3802 lwkt_serialize_exit(ifp->if_serializer);
3807 bge_coal_change(struct bge_softc *sc)
3809 struct ifnet *ifp = &sc->arpcom.ac_if;
3812 ASSERT_SERIALIZED(ifp->if_serializer);
3814 if (sc->bge_coal_chg & BGE_RX_COAL_TICKS_CHG) {
3815 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS,
3816 sc->bge_rx_coal_ticks);
3818 val = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
3821 if_printf(ifp, "rx_coal_ticks -> %u\n",
3822 sc->bge_rx_coal_ticks);
3826 if (sc->bge_coal_chg & BGE_TX_COAL_TICKS_CHG) {
3827 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS,
3828 sc->bge_tx_coal_ticks);
3830 val = CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS);
3833 if_printf(ifp, "tx_coal_ticks -> %u\n",
3834 sc->bge_tx_coal_ticks);
3838 if (sc->bge_coal_chg & BGE_RX_MAX_COAL_BDS_CHG) {
3839 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS,
3840 sc->bge_rx_max_coal_bds);
3842 val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
3845 if_printf(ifp, "rx_max_coal_bds -> %u\n",
3846 sc->bge_rx_max_coal_bds);
3850 if (sc->bge_coal_chg & BGE_TX_MAX_COAL_BDS_CHG) {
3851 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS,
3852 sc->bge_tx_max_coal_bds);
3854 val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS);
3857 if_printf(ifp, "tx_max_coal_bds -> %u\n",
3858 sc->bge_tx_max_coal_bds);
3862 sc->bge_coal_chg = 0;
3866 bge_enable_intr(struct bge_softc *sc)
3868 struct ifnet *ifp = &sc->arpcom.ac_if;
3870 lwkt_serialize_handler_enable(ifp->if_serializer);
3875 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3878 * Unmask the interrupt when we stop polling.
3880 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3883 * Trigger another interrupt, since above writing
3884 * to interrupt mailbox0 may acknowledge pending
3887 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3891 bge_disable_intr(struct bge_softc *sc)
3893 struct ifnet *ifp = &sc->arpcom.ac_if;
3896 * Mask the interrupt when we start polling.
3898 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3901 * Acknowledge possible asserted interrupt.
3903 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3905 lwkt_serialize_handler_disable(ifp->if_serializer);
3909 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
3914 mac_addr = bge_readmem_ind(sc, 0x0c14);
3915 if ((mac_addr >> 16) == 0x484b) {
3916 ether_addr[0] = (uint8_t)(mac_addr >> 8);
3917 ether_addr[1] = (uint8_t)mac_addr;
3918 mac_addr = bge_readmem_ind(sc, 0x0c18);
3919 ether_addr[2] = (uint8_t)(mac_addr >> 24);
3920 ether_addr[3] = (uint8_t)(mac_addr >> 16);
3921 ether_addr[4] = (uint8_t)(mac_addr >> 8);
3922 ether_addr[5] = (uint8_t)mac_addr;
3929 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
3931 int mac_offset = BGE_EE_MAC_OFFSET;
3933 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
3934 mac_offset = BGE_EE_MAC_OFFSET_5906;
3936 return bge_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN);
3940 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
3942 if (sc->bge_flags & BGE_FLAG_NO_EEPROM)
3945 return bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
3950 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
3952 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
3953 /* NOTE: Order is critical */
3955 bge_get_eaddr_nvram,
3956 bge_get_eaddr_eeprom,
3959 const bge_eaddr_fcn_t *func;
3961 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
3962 if ((*func)(sc, eaddr) == 0)
3965 return (*func == NULL ? ENXIO : 0);