2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
37 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 * Written by Bill Paul <wpaul@windriver.com>
40 * Senior Engineer, Wind River Systems
44 * The Broadcom BCM5700 is based on technology originally developed by
45 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
46 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
47 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
48 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
49 * frames, highly configurable RX filtering, and 16 RX and TX queues
50 * (which, along with RX filter rules, can be used for QOS applications).
51 * Other features, such as TCP segmentation, may be available as part
52 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
53 * firmware images can be stored in hardware and need not be compiled
56 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
57 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
59 * The BCM5701 is a single-chip solution incorporating both the BCM5700
60 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
61 * does not support external SSRAM.
63 * Broadcom also produces a variation of the BCM5700 under the "Altima"
64 * brand name, which is functionally similar but lacks PCI-X support.
66 * Without external SSRAM, you can only have at most 4 TX rings,
67 * and the use of the mini RX ring is disabled. This seems to imply
68 * that these features are simply not available on the BCM5701. As a
69 * result, this driver does not implement any support for the mini RX
73 #include "opt_ifpoll.h"
75 #include <sys/param.h>
77 #include <sys/endian.h>
78 #include <sys/kernel.h>
80 #include <sys/interrupt.h>
82 #include <sys/malloc.h>
83 #include <sys/queue.h>
85 #include <sys/serialize.h>
86 #include <sys/socket.h>
87 #include <sys/sockio.h>
88 #include <sys/sysctl.h>
90 #include <netinet/ip.h>
91 #include <netinet/tcp.h>
94 #include <net/ethernet.h>
96 #include <net/if_arp.h>
97 #include <net/if_dl.h>
98 #include <net/if_media.h>
99 #include <net/if_poll.h>
100 #include <net/if_types.h>
101 #include <net/ifq_var.h>
102 #include <net/vlan/if_vlan_var.h>
103 #include <net/vlan/if_vlan_ether.h>
105 #include <dev/netif/mii_layer/mii.h>
106 #include <dev/netif/mii_layer/miivar.h>
107 #include <dev/netif/mii_layer/brgphyreg.h>
109 #include <bus/pci/pcidevs.h>
110 #include <bus/pci/pcireg.h>
111 #include <bus/pci/pcivar.h>
113 #include <dev/netif/bge/if_bgereg.h>
114 #include <dev/netif/bge/if_bgevar.h>
116 /* "device miibus" required. See GENERIC if you get errors here. */
117 #include "miibus_if.h"
119 #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
121 static const struct bge_type {
126 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996,
127 "3COM 3C996 Gigabit Ethernet" },
129 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700,
130 "Alteon BCM5700 Gigabit Ethernet" },
131 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701,
132 "Alteon BCM5701 Gigabit Ethernet" },
134 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000,
135 "Altima AC1000 Gigabit Ethernet" },
136 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001,
137 "Altima AC1002 Gigabit Ethernet" },
138 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100,
139 "Altima AC9100 Gigabit Ethernet" },
141 { PCI_VENDOR_APPLE, PCI_PRODUCT_APPLE_BCM5701,
142 "Apple BCM5701 Gigabit Ethernet" },
144 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700,
145 "Broadcom BCM5700 Gigabit Ethernet" },
146 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701,
147 "Broadcom BCM5701 Gigabit Ethernet" },
148 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702,
149 "Broadcom BCM5702 Gigabit Ethernet" },
150 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X,
151 "Broadcom BCM5702X Gigabit Ethernet" },
152 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT,
153 "Broadcom BCM5702 Gigabit Ethernet" },
154 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703,
155 "Broadcom BCM5703 Gigabit Ethernet" },
156 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X,
157 "Broadcom BCM5703X Gigabit Ethernet" },
158 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703A3,
159 "Broadcom BCM5703 Gigabit Ethernet" },
160 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C,
161 "Broadcom BCM5704C Dual Gigabit Ethernet" },
162 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S,
163 "Broadcom BCM5704S Dual Gigabit Ethernet" },
164 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S_ALT,
165 "Broadcom BCM5704S Dual Gigabit Ethernet" },
166 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705,
167 "Broadcom BCM5705 Gigabit Ethernet" },
168 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705F,
169 "Broadcom BCM5705F Gigabit Ethernet" },
170 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K,
171 "Broadcom BCM5705K Gigabit Ethernet" },
172 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M,
173 "Broadcom BCM5705M Gigabit Ethernet" },
174 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT,
175 "Broadcom BCM5705M Gigabit Ethernet" },
176 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714,
177 "Broadcom BCM5714C Gigabit Ethernet" },
178 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714S,
179 "Broadcom BCM5714S Gigabit Ethernet" },
180 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715,
181 "Broadcom BCM5715 Gigabit Ethernet" },
182 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715S,
183 "Broadcom BCM5715S Gigabit Ethernet" },
184 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720,
185 "Broadcom BCM5720 Gigabit Ethernet" },
186 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721,
187 "Broadcom BCM5721 Gigabit Ethernet" },
188 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5722,
189 "Broadcom BCM5722 Gigabit Ethernet" },
190 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5723,
191 "Broadcom BCM5723 Gigabit Ethernet" },
192 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750,
193 "Broadcom BCM5750 Gigabit Ethernet" },
194 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750M,
195 "Broadcom BCM5750M Gigabit Ethernet" },
196 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751,
197 "Broadcom BCM5751 Gigabit Ethernet" },
198 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751F,
199 "Broadcom BCM5751F Gigabit Ethernet" },
200 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M,
201 "Broadcom BCM5751M Gigabit Ethernet" },
202 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752,
203 "Broadcom BCM5752 Gigabit Ethernet" },
204 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752M,
205 "Broadcom BCM5752M Gigabit Ethernet" },
206 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753,
207 "Broadcom BCM5753 Gigabit Ethernet" },
208 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753F,
209 "Broadcom BCM5753F Gigabit Ethernet" },
210 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753M,
211 "Broadcom BCM5753M Gigabit Ethernet" },
212 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754,
213 "Broadcom BCM5754 Gigabit Ethernet" },
214 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754M,
215 "Broadcom BCM5754M Gigabit Ethernet" },
216 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755,
217 "Broadcom BCM5755 Gigabit Ethernet" },
218 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755M,
219 "Broadcom BCM5755M Gigabit Ethernet" },
220 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5756,
221 "Broadcom BCM5756 Gigabit Ethernet" },
222 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761,
223 "Broadcom BCM5761 Gigabit Ethernet" },
224 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761E,
225 "Broadcom BCM5761E Gigabit Ethernet" },
226 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761S,
227 "Broadcom BCM5761S Gigabit Ethernet" },
228 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761SE,
229 "Broadcom BCM5761SE Gigabit Ethernet" },
230 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5764,
231 "Broadcom BCM5764 Gigabit Ethernet" },
232 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780,
233 "Broadcom BCM5780 Gigabit Ethernet" },
234 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780S,
235 "Broadcom BCM5780S Gigabit Ethernet" },
236 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5781,
237 "Broadcom BCM5781 Gigabit Ethernet" },
238 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782,
239 "Broadcom BCM5782 Gigabit Ethernet" },
240 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5784,
241 "Broadcom BCM5784 Gigabit Ethernet" },
242 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785F,
243 "Broadcom BCM5785F Gigabit Ethernet" },
244 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785G,
245 "Broadcom BCM5785G Gigabit Ethernet" },
246 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5786,
247 "Broadcom BCM5786 Gigabit Ethernet" },
248 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787,
249 "Broadcom BCM5787 Gigabit Ethernet" },
250 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787F,
251 "Broadcom BCM5787F Gigabit Ethernet" },
252 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787M,
253 "Broadcom BCM5787M Gigabit Ethernet" },
254 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788,
255 "Broadcom BCM5788 Gigabit Ethernet" },
256 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789,
257 "Broadcom BCM5789 Gigabit Ethernet" },
258 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901,
259 "Broadcom BCM5901 Fast Ethernet" },
260 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2,
261 "Broadcom BCM5901A2 Fast Ethernet" },
262 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5903M,
263 "Broadcom BCM5903M Fast Ethernet" },
264 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906,
265 "Broadcom BCM5906 Fast Ethernet"},
266 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906M,
267 "Broadcom BCM5906M Fast Ethernet"},
268 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57760,
269 "Broadcom BCM57760 Gigabit Ethernet"},
270 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57780,
271 "Broadcom BCM57780 Gigabit Ethernet"},
272 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57788,
273 "Broadcom BCM57788 Gigabit Ethernet"},
274 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57790,
275 "Broadcom BCM57790 Gigabit Ethernet"},
276 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
277 "SysKonnect Gigabit Ethernet" },
282 #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
283 #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
284 #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
285 #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
286 #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
287 #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
288 #define BGE_IS_5788(sc) ((sc)->bge_flags & BGE_FLAG_5788)
290 #define BGE_IS_CRIPPLED(sc) \
291 (BGE_IS_5788((sc)) || (sc)->bge_asicrev == BGE_ASICREV_BCM5700)
293 typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
295 static int bge_probe(device_t);
296 static int bge_attach(device_t);
297 static int bge_detach(device_t);
298 static void bge_txeof(struct bge_softc *, uint16_t);
299 static void bge_rxeof(struct bge_softc *, uint16_t);
301 static void bge_tick(void *);
302 static void bge_stats_update(struct bge_softc *);
303 static void bge_stats_update_regs(struct bge_softc *);
305 bge_defrag_shortdma(struct mbuf *);
306 static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
307 static int bge_setup_tso(struct bge_softc *, struct mbuf **,
308 uint16_t *, uint16_t *);
311 static void bge_npoll(struct ifnet *, struct ifpoll_info *);
312 static void bge_npoll_compat(struct ifnet *, void *, int );
314 static void bge_intr_crippled(void *);
315 static void bge_intr_legacy(void *);
316 static void bge_msi(void *);
317 static void bge_msi_oneshot(void *);
318 static void bge_intr(struct bge_softc *);
319 static void bge_enable_intr(struct bge_softc *);
320 static void bge_disable_intr(struct bge_softc *);
321 static void bge_start(struct ifnet *);
322 static int bge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
323 static void bge_init(void *);
324 static void bge_stop(struct bge_softc *);
325 static void bge_watchdog(struct ifnet *);
326 static void bge_shutdown(device_t);
327 static int bge_suspend(device_t);
328 static int bge_resume(device_t);
329 static int bge_ifmedia_upd(struct ifnet *);
330 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
332 static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
333 static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
335 static uint8_t bge_eeprom_getbyte(struct bge_softc *, uint32_t, uint8_t *);
336 static int bge_read_eeprom(struct bge_softc *, caddr_t, uint32_t, size_t);
338 static void bge_setmulti(struct bge_softc *);
339 static void bge_setpromisc(struct bge_softc *);
340 static void bge_enable_msi(struct bge_softc *sc);
342 static int bge_alloc_jumbo_mem(struct bge_softc *);
343 static void bge_free_jumbo_mem(struct bge_softc *);
344 static struct bge_jslot
345 *bge_jalloc(struct bge_softc *);
346 static void bge_jfree(void *);
347 static void bge_jref(void *);
348 static int bge_newbuf_std(struct bge_softc *, int, int);
349 static int bge_newbuf_jumbo(struct bge_softc *, int, int);
350 static void bge_setup_rxdesc_std(struct bge_softc *, int);
351 static void bge_setup_rxdesc_jumbo(struct bge_softc *, int);
352 static int bge_init_rx_ring_std(struct bge_softc *);
353 static void bge_free_rx_ring_std(struct bge_softc *);
354 static int bge_init_rx_ring_jumbo(struct bge_softc *);
355 static void bge_free_rx_ring_jumbo(struct bge_softc *);
356 static void bge_free_tx_ring(struct bge_softc *);
357 static int bge_init_tx_ring(struct bge_softc *);
359 static int bge_chipinit(struct bge_softc *);
360 static int bge_blockinit(struct bge_softc *);
361 static void bge_stop_block(struct bge_softc *, bus_size_t, uint32_t);
363 static uint32_t bge_readmem_ind(struct bge_softc *, uint32_t);
364 static void bge_writemem_ind(struct bge_softc *, uint32_t, uint32_t);
366 static uint32_t bge_readreg_ind(struct bge_softc *, uint32_t);
368 static void bge_writereg_ind(struct bge_softc *, uint32_t, uint32_t);
369 static void bge_writemem_direct(struct bge_softc *, uint32_t, uint32_t);
370 static void bge_writembx(struct bge_softc *, int, int);
372 static int bge_miibus_readreg(device_t, int, int);
373 static int bge_miibus_writereg(device_t, int, int, int);
374 static void bge_miibus_statchg(device_t);
375 static void bge_bcm5700_link_upd(struct bge_softc *, uint32_t);
376 static void bge_tbi_link_upd(struct bge_softc *, uint32_t);
377 static void bge_copper_link_upd(struct bge_softc *, uint32_t);
378 static void bge_autopoll_link_upd(struct bge_softc *, uint32_t);
379 static void bge_link_poll(struct bge_softc *);
381 static void bge_reset(struct bge_softc *);
383 static int bge_dma_alloc(struct bge_softc *);
384 static void bge_dma_free(struct bge_softc *);
385 static int bge_dma_block_alloc(struct bge_softc *, bus_size_t,
386 bus_dma_tag_t *, bus_dmamap_t *,
387 void **, bus_addr_t *);
388 static void bge_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
390 static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
391 static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
392 static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
393 static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
395 static void bge_coal_change(struct bge_softc *);
396 static int bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS);
397 static int bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS);
398 static int bge_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS);
399 static int bge_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS);
400 static int bge_sysctl_rx_coal_ticks_int(SYSCTL_HANDLER_ARGS);
401 static int bge_sysctl_tx_coal_ticks_int(SYSCTL_HANDLER_ARGS);
402 static int bge_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS);
403 static int bge_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS);
404 static int bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *,
408 * Set following tunable to 1 for some IBM blade servers with the DNLK
409 * switch module. Auto negotiation is broken for those configurations.
411 static int bge_fake_autoneg = 0;
412 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg);
414 static int bge_msi_enable = 1;
415 TUNABLE_INT("hw.bge.msi.enable", &bge_msi_enable);
417 #if !defined(KTR_IF_BGE)
418 #define KTR_IF_BGE KTR_ALL
420 KTR_INFO_MASTER(if_bge);
421 KTR_INFO(KTR_IF_BGE, if_bge, intr, 0, "intr");
422 KTR_INFO(KTR_IF_BGE, if_bge, rx_pkt, 1, "rx_pkt");
423 KTR_INFO(KTR_IF_BGE, if_bge, tx_pkt, 2, "tx_pkt");
424 #define logif(name) KTR_LOG(if_bge_ ## name)
426 static device_method_t bge_methods[] = {
427 /* Device interface */
428 DEVMETHOD(device_probe, bge_probe),
429 DEVMETHOD(device_attach, bge_attach),
430 DEVMETHOD(device_detach, bge_detach),
431 DEVMETHOD(device_shutdown, bge_shutdown),
432 DEVMETHOD(device_suspend, bge_suspend),
433 DEVMETHOD(device_resume, bge_resume),
436 DEVMETHOD(bus_print_child, bus_generic_print_child),
437 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
440 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
441 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
442 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
447 static DEFINE_CLASS_0(bge, bge_driver, bge_methods, sizeof(struct bge_softc));
448 static devclass_t bge_devclass;
450 DECLARE_DUMMY_MODULE(if_bge);
451 DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, NULL, NULL);
452 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, NULL, NULL);
455 bge_readmem_ind(struct bge_softc *sc, uint32_t off)
457 device_t dev = sc->bge_dev;
460 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
461 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
464 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
465 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
466 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
471 bge_writemem_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
473 device_t dev = sc->bge_dev;
475 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
476 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
479 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
480 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
481 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
486 bge_readreg_ind(struct bge_softc *sc, uin32_t off)
488 device_t dev = sc->bge_dev;
490 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
491 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
496 bge_writereg_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
498 device_t dev = sc->bge_dev;
500 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
501 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
505 bge_writemem_direct(struct bge_softc *sc, uint32_t off, uint32_t val)
507 CSR_WRITE_4(sc, off, val);
511 bge_writembx(struct bge_softc *sc, int off, int val)
513 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
514 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
516 CSR_WRITE_4(sc, off, val);
517 if (sc->bge_mbox_reorder)
522 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
524 uint32_t access, byte = 0;
528 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
529 for (i = 0; i < 8000; i++) {
530 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
538 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
539 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
541 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
542 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
543 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
545 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
551 if (i == BGE_TIMEOUT * 10) {
552 if_printf(&sc->arpcom.ac_if, "nvram read timed out\n");
557 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
559 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
561 /* Disable access. */
562 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
565 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
566 CSR_READ_4(sc, BGE_NVRAM_SWARB);
572 * Read a sequence of bytes from NVRAM.
575 bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
580 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
583 for (i = 0; i < cnt; i++) {
584 err = bge_nvram_getbyte(sc, off + i, &byte);
590 return (err ? 1 : 0);
594 * Read a byte of data stored in the EEPROM at address 'addr.' The
595 * BCM570x supports both the traditional bitbang interface and an
596 * auto access interface for reading the EEPROM. We use the auto
600 bge_eeprom_getbyte(struct bge_softc *sc, uint32_t addr, uint8_t *dest)
606 * Enable use of auto EEPROM access so we can avoid
607 * having to use the bitbang method.
609 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
611 /* Reset the EEPROM, load the clock period. */
612 CSR_WRITE_4(sc, BGE_EE_ADDR,
613 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
616 /* Issue the read EEPROM command. */
617 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
619 /* Wait for completion */
620 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
622 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
626 if (i == BGE_TIMEOUT) {
627 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
632 byte = CSR_READ_4(sc, BGE_EE_DATA);
634 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
640 * Read a sequence of bytes from the EEPROM.
643 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, uint32_t off, size_t len)
649 for (byte = 0, err = 0, i = 0; i < len; i++) {
650 err = bge_eeprom_getbyte(sc, off + i, &byte);
660 bge_miibus_readreg(device_t dev, int phy, int reg)
662 struct bge_softc *sc = device_get_softc(dev);
666 KASSERT(phy == sc->bge_phyno,
667 ("invalid phyno %d, should be %d", phy, sc->bge_phyno));
669 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
670 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
671 CSR_WRITE_4(sc, BGE_MI_MODE,
672 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
676 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
677 BGE_MIPHY(phy) | BGE_MIREG(reg));
679 /* Poll for the PHY register access to complete. */
680 for (i = 0; i < BGE_TIMEOUT; i++) {
682 val = CSR_READ_4(sc, BGE_MI_COMM);
683 if ((val & BGE_MICOMM_BUSY) == 0) {
685 val = CSR_READ_4(sc, BGE_MI_COMM);
689 if (i == BGE_TIMEOUT) {
690 if_printf(&sc->arpcom.ac_if, "PHY read timed out "
691 "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val);
695 /* Restore the autopoll bit if necessary. */
696 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
697 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
701 if (val & BGE_MICOMM_READFAIL)
704 return (val & 0xFFFF);
708 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
710 struct bge_softc *sc = device_get_softc(dev);
713 KASSERT(phy == sc->bge_phyno,
714 ("invalid phyno %d, should be %d", phy, sc->bge_phyno));
716 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
717 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
720 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
721 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
722 CSR_WRITE_4(sc, BGE_MI_MODE,
723 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
727 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
728 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
730 for (i = 0; i < BGE_TIMEOUT; i++) {
732 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
734 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
738 if (i == BGE_TIMEOUT) {
739 if_printf(&sc->arpcom.ac_if, "PHY write timed out "
740 "(phy %d, reg %d, val %d)\n", phy, reg, val);
743 /* Restore the autopoll bit if necessary. */
744 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
745 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
753 bge_miibus_statchg(device_t dev)
755 struct bge_softc *sc;
756 struct mii_data *mii;
758 sc = device_get_softc(dev);
759 mii = device_get_softc(sc->bge_miibus);
761 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
762 (IFM_ACTIVE | IFM_AVALID)) {
763 switch (IFM_SUBTYPE(mii->mii_media_active)) {
771 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
783 if (sc->bge_link == 0)
786 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
787 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
788 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
789 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
791 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
794 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
795 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
797 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
802 * Memory management for jumbo frames.
805 bge_alloc_jumbo_mem(struct bge_softc *sc)
807 struct ifnet *ifp = &sc->arpcom.ac_if;
808 struct bge_jslot *entry;
814 * Create tag for jumbo mbufs.
815 * This is really a bit of a kludge. We allocate a special
816 * jumbo buffer pool which (thanks to the way our DMA
817 * memory allocation works) will consist of contiguous
818 * pages. This means that even though a jumbo buffer might
819 * be larger than a page size, we don't really need to
820 * map it into more than one DMA segment. However, the
821 * default mbuf tag will result in multi-segment mappings,
822 * so we have to create a special jumbo mbuf tag that
823 * lets us get away with mapping the jumbo buffers as
824 * a single segment. I think eventually the driver should
825 * be changed so that it uses ordinary mbufs and cluster
826 * buffers, i.e. jumbo frames can span multiple DMA
827 * descriptors. But that's a project for another day.
831 * Create DMA stuffs for jumbo RX ring.
833 error = bge_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ,
834 &sc->bge_cdata.bge_rx_jumbo_ring_tag,
835 &sc->bge_cdata.bge_rx_jumbo_ring_map,
836 (void *)&sc->bge_ldata.bge_rx_jumbo_ring,
837 &sc->bge_ldata.bge_rx_jumbo_ring_paddr);
839 if_printf(ifp, "could not create jumbo RX ring\n");
844 * Create DMA stuffs for jumbo buffer block.
846 error = bge_dma_block_alloc(sc, BGE_JMEM,
847 &sc->bge_cdata.bge_jumbo_tag,
848 &sc->bge_cdata.bge_jumbo_map,
849 (void **)&sc->bge_ldata.bge_jumbo_buf,
852 if_printf(ifp, "could not create jumbo buffer\n");
856 SLIST_INIT(&sc->bge_jfree_listhead);
859 * Now divide it up into 9K pieces and save the addresses
860 * in an array. Note that we play an evil trick here by using
861 * the first few bytes in the buffer to hold the the address
862 * of the softc structure for this interface. This is because
863 * bge_jfree() needs it, but it is called by the mbuf management
864 * code which will not pass it to us explicitly.
866 for (i = 0, ptr = sc->bge_ldata.bge_jumbo_buf; i < BGE_JSLOTS; i++) {
867 entry = &sc->bge_cdata.bge_jslots[i];
869 entry->bge_buf = ptr;
870 entry->bge_paddr = paddr;
871 entry->bge_inuse = 0;
873 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jslot_link);
882 bge_free_jumbo_mem(struct bge_softc *sc)
884 /* Destroy jumbo RX ring. */
885 bge_dma_block_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
886 sc->bge_cdata.bge_rx_jumbo_ring_map,
887 sc->bge_ldata.bge_rx_jumbo_ring);
889 /* Destroy jumbo buffer block. */
890 bge_dma_block_free(sc->bge_cdata.bge_jumbo_tag,
891 sc->bge_cdata.bge_jumbo_map,
892 sc->bge_ldata.bge_jumbo_buf);
896 * Allocate a jumbo buffer.
898 static struct bge_jslot *
899 bge_jalloc(struct bge_softc *sc)
901 struct bge_jslot *entry;
903 lwkt_serialize_enter(&sc->bge_jslot_serializer);
904 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
906 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jslot_link);
907 entry->bge_inuse = 1;
909 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
911 lwkt_serialize_exit(&sc->bge_jslot_serializer);
916 * Adjust usage count on a jumbo buffer.
921 struct bge_jslot *entry = (struct bge_jslot *)arg;
922 struct bge_softc *sc = entry->bge_sc;
925 panic("bge_jref: can't find softc pointer!");
927 if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
928 panic("bge_jref: asked to reference buffer "
929 "that we don't manage!");
930 } else if (entry->bge_inuse == 0) {
931 panic("bge_jref: buffer already free!");
933 atomic_add_int(&entry->bge_inuse, 1);
938 * Release a jumbo buffer.
943 struct bge_jslot *entry = (struct bge_jslot *)arg;
944 struct bge_softc *sc = entry->bge_sc;
947 panic("bge_jfree: can't find softc pointer!");
949 if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
950 panic("bge_jfree: asked to free buffer that we don't manage!");
951 } else if (entry->bge_inuse == 0) {
952 panic("bge_jfree: buffer already free!");
955 * Possible MP race to 0, use the serializer. The atomic insn
956 * is still needed for races against bge_jref().
958 lwkt_serialize_enter(&sc->bge_jslot_serializer);
959 atomic_subtract_int(&entry->bge_inuse, 1);
960 if (entry->bge_inuse == 0) {
961 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
964 lwkt_serialize_exit(&sc->bge_jslot_serializer);
970 * Intialize a standard receive ring descriptor.
973 bge_newbuf_std(struct bge_softc *sc, int i, int init)
975 struct mbuf *m_new = NULL;
976 bus_dma_segment_t seg;
980 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
983 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
985 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
986 m_adj(m_new, ETHER_ALIGN);
988 error = bus_dmamap_load_mbuf_segment(sc->bge_cdata.bge_rx_mtag,
989 sc->bge_cdata.bge_rx_tmpmap, m_new,
990 &seg, 1, &nsegs, BUS_DMA_NOWAIT);
997 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
998 sc->bge_cdata.bge_rx_std_dmamap[i],
999 BUS_DMASYNC_POSTREAD);
1000 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1001 sc->bge_cdata.bge_rx_std_dmamap[i]);
1004 map = sc->bge_cdata.bge_rx_tmpmap;
1005 sc->bge_cdata.bge_rx_tmpmap = sc->bge_cdata.bge_rx_std_dmamap[i];
1006 sc->bge_cdata.bge_rx_std_dmamap[i] = map;
1008 sc->bge_cdata.bge_rx_std_chain[i].bge_mbuf = m_new;
1009 sc->bge_cdata.bge_rx_std_chain[i].bge_paddr = seg.ds_addr;
1011 bge_setup_rxdesc_std(sc, i);
1016 bge_setup_rxdesc_std(struct bge_softc *sc, int i)
1018 struct bge_rxchain *rc;
1019 struct bge_rx_bd *r;
1021 rc = &sc->bge_cdata.bge_rx_std_chain[i];
1022 r = &sc->bge_ldata.bge_rx_std_ring[i];
1024 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bge_paddr);
1025 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bge_paddr);
1026 r->bge_len = rc->bge_mbuf->m_len;
1028 r->bge_flags = BGE_RXBDFLAG_END;
1032 * Initialize a jumbo receive ring descriptor. This allocates
1033 * a jumbo buffer from the pool managed internally by the driver.
1036 bge_newbuf_jumbo(struct bge_softc *sc, int i, int init)
1038 struct mbuf *m_new = NULL;
1039 struct bge_jslot *buf;
1042 /* Allocate the mbuf. */
1043 MGETHDR(m_new, init ? MB_WAIT : MB_DONTWAIT, MT_DATA);
1047 /* Allocate the jumbo buffer */
1048 buf = bge_jalloc(sc);
1054 /* Attach the buffer to the mbuf. */
1055 m_new->m_ext.ext_arg = buf;
1056 m_new->m_ext.ext_buf = buf->bge_buf;
1057 m_new->m_ext.ext_free = bge_jfree;
1058 m_new->m_ext.ext_ref = bge_jref;
1059 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
1061 m_new->m_flags |= M_EXT;
1063 m_new->m_data = m_new->m_ext.ext_buf;
1064 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
1066 paddr = buf->bge_paddr;
1067 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) {
1068 m_adj(m_new, ETHER_ALIGN);
1069 paddr += ETHER_ALIGN;
1072 /* Save necessary information */
1073 sc->bge_cdata.bge_rx_jumbo_chain[i].bge_mbuf = m_new;
1074 sc->bge_cdata.bge_rx_jumbo_chain[i].bge_paddr = paddr;
1076 /* Set up the descriptor. */
1077 bge_setup_rxdesc_jumbo(sc, i);
1082 bge_setup_rxdesc_jumbo(struct bge_softc *sc, int i)
1084 struct bge_rx_bd *r;
1085 struct bge_rxchain *rc;
1087 r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
1088 rc = &sc->bge_cdata.bge_rx_jumbo_chain[i];
1090 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bge_paddr);
1091 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bge_paddr);
1092 r->bge_len = rc->bge_mbuf->m_len;
1094 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
1098 bge_init_rx_ring_std(struct bge_softc *sc)
1102 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1103 error = bge_newbuf_std(sc, i, 1);
1108 sc->bge_std = BGE_STD_RX_RING_CNT - 1;
1109 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1115 bge_free_rx_ring_std(struct bge_softc *sc)
1119 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1120 struct bge_rxchain *rc = &sc->bge_cdata.bge_rx_std_chain[i];
1122 if (rc->bge_mbuf != NULL) {
1123 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1124 sc->bge_cdata.bge_rx_std_dmamap[i]);
1125 m_freem(rc->bge_mbuf);
1126 rc->bge_mbuf = NULL;
1128 bzero(&sc->bge_ldata.bge_rx_std_ring[i],
1129 sizeof(struct bge_rx_bd));
1134 bge_init_rx_ring_jumbo(struct bge_softc *sc)
1136 struct bge_rcb *rcb;
1139 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1140 error = bge_newbuf_jumbo(sc, i, 1);
1145 sc->bge_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
1147 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1148 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
1149 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1151 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1157 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1161 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1162 struct bge_rxchain *rc = &sc->bge_cdata.bge_rx_jumbo_chain[i];
1164 if (rc->bge_mbuf != NULL) {
1165 m_freem(rc->bge_mbuf);
1166 rc->bge_mbuf = NULL;
1168 bzero(&sc->bge_ldata.bge_rx_jumbo_ring[i],
1169 sizeof(struct bge_rx_bd));
1174 bge_free_tx_ring(struct bge_softc *sc)
1178 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1179 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1180 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1181 sc->bge_cdata.bge_tx_dmamap[i]);
1182 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1183 sc->bge_cdata.bge_tx_chain[i] = NULL;
1185 bzero(&sc->bge_ldata.bge_tx_ring[i],
1186 sizeof(struct bge_tx_bd));
1191 bge_init_tx_ring(struct bge_softc *sc)
1194 sc->bge_tx_saved_considx = 0;
1195 sc->bge_tx_prodidx = 0;
1197 /* Initialize transmit producer index for host-memory send ring. */
1198 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1200 /* 5700 b2 errata */
1201 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1202 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1204 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1205 /* 5700 b2 errata */
1206 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1207 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1213 bge_setmulti(struct bge_softc *sc)
1216 struct ifmultiaddr *ifma;
1217 uint32_t hashes[4] = { 0, 0, 0, 0 };
1220 ifp = &sc->arpcom.ac_if;
1222 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1223 for (i = 0; i < 4; i++)
1224 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1228 /* First, zot all the existing filters. */
1229 for (i = 0; i < 4; i++)
1230 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1232 /* Now program new ones. */
1233 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1234 if (ifma->ifma_addr->sa_family != AF_LINK)
1237 LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1238 ETHER_ADDR_LEN) & 0x7f;
1239 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1242 for (i = 0; i < 4; i++)
1243 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1247 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1248 * self-test results.
1251 bge_chipinit(struct bge_softc *sc)
1254 uint32_t dma_rw_ctl;
1257 /* Set endian type before we access any non-PCI registers. */
1258 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1259 BGE_INIT | sc->bge_pci_miscctl, 4);
1261 /* Clear the MAC control register */
1262 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1265 * Clear the MAC statistics block in the NIC's
1268 for (i = BGE_STATS_BLOCK;
1269 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1270 BGE_MEMWIN_WRITE(sc, i, 0);
1272 for (i = BGE_STATUS_BLOCK;
1273 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1274 BGE_MEMWIN_WRITE(sc, i, 0);
1276 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1278 * Fix data corruption caused by non-qword write with WB.
1279 * Fix master abort in PCI mode.
1280 * Fix PCI latency timer.
1282 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1283 val |= (1 << 10) | (1 << 12) | (1 << 13);
1284 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1287 /* Set up the PCI DMA control register. */
1288 dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD;
1289 if (sc->bge_flags & BGE_FLAG_PCIE) {
1291 /* DMA read watermark not used on PCI-E */
1292 dma_rw_ctl |= (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1293 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1295 if (sc->bge_asicrev == BGE_ASICREV_BCM5780) {
1296 dma_rw_ctl |= (0x4 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1297 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1298 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1299 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5714) {
1300 dma_rw_ctl |= (0x4 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1301 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1302 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1303 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1304 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1305 uint32_t rd_wat = 0x7;
1308 clkctl = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1309 if ((sc->bge_flags & BGE_FLAG_MAXADDR_40BIT) &&
1310 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1312 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1313 } else if (clkctl == 0x6 || clkctl == 0x7) {
1315 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1317 if (sc->bge_asicrev == BGE_ASICREV_BCM5703)
1320 dma_rw_ctl |= (rd_wat << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1321 (3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1322 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1324 dma_rw_ctl |= (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1325 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1329 /* Conventional PCI bus */
1330 dma_rw_ctl |= (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1331 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1332 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1333 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1337 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1338 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1339 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1340 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1341 sc->bge_asicrev == BGE_ASICREV_BCM5701) {
1342 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1343 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1345 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1348 * Set up general mode register.
1350 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1351 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1352 BGE_MODECTL_TX_NO_PHDR_CSUM);
1355 * BCM5701 B5 have a bug causing data corruption when using
1356 * 64-bit DMA reads, which can be terminated early and then
1357 * completed later as 32-bit accesses, in combination with
1360 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1361 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1362 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1365 * Disable memory write invalidate. Apparently it is not supported
1366 * properly by these devices. Also ensure that INTx isn't disabled,
1367 * as these chips need it even when using MSI.
1369 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1370 (PCIM_CMD_MWRICEN | PCIM_CMD_INTxDIS), 4);
1372 /* Set the timer prescaler (always 66Mhz) */
1373 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1375 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1376 DELAY(40); /* XXX */
1378 /* Put PHY into ready state */
1379 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1380 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1388 bge_blockinit(struct bge_softc *sc)
1390 struct bge_rcb *rcb;
1397 * Initialize the memory window pointer register so that
1398 * we can access the first 32K of internal NIC RAM. This will
1399 * allow us to set up the TX send ring RCBs and the RX return
1400 * ring RCBs, plus other things which live in NIC memory.
1402 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1404 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1406 if (!BGE_IS_5705_PLUS(sc)) {
1407 /* Configure mbuf memory pool */
1408 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1409 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1410 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1412 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1414 /* Configure DMA resource pool */
1415 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1416 BGE_DMA_DESCRIPTORS);
1417 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1420 /* Configure mbuf pool watermarks */
1421 if (!BGE_IS_5705_PLUS(sc)) {
1422 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1423 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1424 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1425 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1426 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1427 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1428 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1430 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1431 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1432 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1435 /* Configure DMA resource watermarks */
1436 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1437 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1439 /* Enable buffer manager */
1440 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1441 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1443 /* Poll for buffer manager start indication */
1444 for (i = 0; i < BGE_TIMEOUT; i++) {
1445 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1450 if (i == BGE_TIMEOUT) {
1451 if_printf(&sc->arpcom.ac_if,
1452 "buffer manager failed to start\n");
1456 /* Enable flow-through queues */
1457 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1458 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1460 /* Wait until queue initialization is complete */
1461 for (i = 0; i < BGE_TIMEOUT; i++) {
1462 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1467 if (i == BGE_TIMEOUT) {
1468 if_printf(&sc->arpcom.ac_if,
1469 "flow-through queue init failed\n");
1474 * Summary of rings supported by the controller:
1476 * Standard Receive Producer Ring
1477 * - This ring is used to feed receive buffers for "standard"
1478 * sized frames (typically 1536 bytes) to the controller.
1480 * Jumbo Receive Producer Ring
1481 * - This ring is used to feed receive buffers for jumbo sized
1482 * frames (i.e. anything bigger than the "standard" frames)
1483 * to the controller.
1485 * Mini Receive Producer Ring
1486 * - This ring is used to feed receive buffers for "mini"
1487 * sized frames to the controller.
1488 * - This feature required external memory for the controller
1489 * but was never used in a production system. Should always
1492 * Receive Return Ring
1493 * - After the controller has placed an incoming frame into a
1494 * receive buffer that buffer is moved into a receive return
1495 * ring. The driver is then responsible to passing the
1496 * buffer up to the stack. Many versions of the controller
1497 * support multiple RR rings.
1500 * - This ring is used for outgoing frames. Many versions of
1501 * the controller support multiple send rings.
1504 /* Initialize the standard receive producer ring control block. */
1505 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1506 rcb->bge_hostaddr.bge_addr_lo =
1507 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1508 rcb->bge_hostaddr.bge_addr_hi =
1509 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1510 if (BGE_IS_5705_PLUS(sc)) {
1512 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1513 * Bits 15-2 : Reserved (should be 0)
1514 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1517 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1520 * Ring size is always XXX entries
1521 * Bits 31-16: Maximum RX frame size
1522 * Bits 15-2 : Reserved (should be 0)
1523 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1526 rcb->bge_maxlen_flags =
1527 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1529 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1530 /* Write the standard receive producer ring control block. */
1531 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1532 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1533 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1534 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1535 /* Reset the standard receive producer ring producer index. */
1536 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1539 * Initialize the jumbo RX producer ring control
1540 * block. We set the 'ring disabled' bit in the
1541 * flags field until we're actually ready to start
1542 * using this ring (i.e. once we set the MTU
1543 * high enough to require it).
1545 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1546 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1547 /* Get the jumbo receive producer ring RCB parameters. */
1548 rcb->bge_hostaddr.bge_addr_lo =
1549 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1550 rcb->bge_hostaddr.bge_addr_hi =
1551 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1552 rcb->bge_maxlen_flags =
1553 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1554 BGE_RCB_FLAG_RING_DISABLED);
1555 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1556 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1557 rcb->bge_hostaddr.bge_addr_hi);
1558 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1559 rcb->bge_hostaddr.bge_addr_lo);
1560 /* Program the jumbo receive producer ring RCB parameters. */
1561 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1562 rcb->bge_maxlen_flags);
1563 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1564 /* Reset the jumbo receive producer ring producer index. */
1565 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1568 /* Disable the mini receive producer ring RCB. */
1569 if (BGE_IS_5700_FAMILY(sc)) {
1570 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1571 rcb->bge_maxlen_flags =
1572 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1573 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1574 rcb->bge_maxlen_flags);
1575 /* Reset the mini receive producer ring producer index. */
1576 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1579 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
1580 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
1581 (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
1582 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
1583 sc->bge_chipid == BGE_CHIPID_BCM5906_A2)) {
1584 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1585 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1589 * The BD ring replenish thresholds control how often the
1590 * hardware fetches new BD's from the producer rings in host
1591 * memory. Setting the value too low on a busy system can
1592 * starve the hardware and recue the throughpout.
1594 * Set the BD ring replentish thresholds. The recommended
1595 * values are 1/8th the number of descriptors allocated to
1598 if (BGE_IS_5705_PLUS(sc))
1601 val = BGE_STD_RX_RING_CNT / 8;
1602 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1603 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1604 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1605 BGE_JUMBO_RX_RING_CNT/8);
1609 * Disable all send rings by setting the 'ring disabled' bit
1610 * in the flags field of all the TX send ring control blocks,
1611 * located in NIC memory.
1613 if (!BGE_IS_5705_PLUS(sc)) {
1614 /* 5700 to 5704 had 16 send rings. */
1615 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
1619 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1620 for (i = 0; i < limit; i++) {
1621 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1622 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1623 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1624 vrcb += sizeof(struct bge_rcb);
1627 /* Configure send ring RCB 0 (we use only the first ring) */
1628 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1629 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1630 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1631 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1632 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1633 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1634 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1635 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1638 * Disable all receive return rings by setting the
1639 * 'ring diabled' bit in the flags field of all the receive
1640 * return ring control blocks, located in NIC memory.
1642 if (!BGE_IS_5705_PLUS(sc))
1643 limit = BGE_RX_RINGS_MAX;
1644 else if (sc->bge_asicrev == BGE_ASICREV_BCM5755)
1648 /* Disable all receive return rings. */
1649 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1650 for (i = 0; i < limit; i++) {
1651 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1652 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1653 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1654 BGE_RCB_FLAG_RING_DISABLED);
1655 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1656 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1657 (i * (sizeof(uint64_t))), 0);
1658 vrcb += sizeof(struct bge_rcb);
1662 * Set up receive return ring 0. Note that the NIC address
1663 * for RX return rings is 0x0. The return rings live entirely
1664 * within the host, so the nicaddr field in the RCB isn't used.
1666 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1667 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1668 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1669 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1670 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1671 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1672 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1674 /* Set random backoff seed for TX */
1675 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1676 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1677 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1678 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1679 BGE_TX_BACKOFF_SEED_MASK);
1681 /* Set inter-packet gap */
1682 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1685 * Specify which ring to use for packets that don't match
1688 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1691 * Configure number of RX lists. One interrupt distribution
1692 * list, sixteen active lists, one bad frames class.
1694 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1696 /* Inialize RX list placement stats mask. */
1697 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1698 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1700 /* Disable host coalescing until we get it set up */
1701 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1703 /* Poll to make sure it's shut down. */
1704 for (i = 0; i < BGE_TIMEOUT; i++) {
1705 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1710 if (i == BGE_TIMEOUT) {
1711 if_printf(&sc->arpcom.ac_if,
1712 "host coalescing engine failed to idle\n");
1716 /* Set up host coalescing defaults */
1717 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1718 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1719 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_coal_bds);
1720 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_coal_bds);
1721 if (!BGE_IS_5705_PLUS(sc)) {
1722 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT,
1723 sc->bge_rx_coal_ticks_int);
1724 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT,
1725 sc->bge_tx_coal_ticks_int);
1729 * The datasheet (57XX-PG105-R) says BCM5705+ do not
1730 * have following two registers; obviously it is wrong.
1732 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, sc->bge_rx_coal_bds_int);
1733 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, sc->bge_tx_coal_bds_int);
1735 /* Set up address of statistics block */
1736 if (!BGE_IS_5705_PLUS(sc)) {
1737 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1738 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1739 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1740 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1742 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1743 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1744 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1747 /* Set up address of status block */
1748 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1749 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1750 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1751 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1752 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1755 * Set up status block partail update size.
1757 * Because only single TX ring, RX produce ring and Rx return ring
1758 * are used, ask device to update only minimum part of status block
1759 * except for BCM5700 AX/BX, whose status block partial update size
1760 * can't be configured.
1762 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1763 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
1764 /* XXX Actually reserved on BCM5700 AX/BX */
1765 val = BGE_STATBLKSZ_FULL;
1767 val = BGE_STATBLKSZ_32BYTE;
1771 * Does not seem to have visible effect in both
1772 * bulk data (1472B UDP datagram) and tiny data
1773 * (18B UDP datagram) TX tests.
1775 if (!BGE_IS_CRIPPLED(sc))
1776 val |= BGE_HCCMODE_CLRTICK_TX;
1779 /* Turn on host coalescing state machine */
1780 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1782 /* Turn on RX BD completion state machine and enable attentions */
1783 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1784 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1786 /* Turn on RX list placement state machine */
1787 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1789 /* Turn on RX list selector state machine. */
1790 if (!BGE_IS_5705_PLUS(sc))
1791 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1793 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1794 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1795 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1796 BGE_MACMODE_FRMHDR_DMA_ENB;
1798 if (sc->bge_flags & BGE_FLAG_TBI)
1799 val |= BGE_PORTMODE_TBI;
1800 else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
1801 val |= BGE_PORTMODE_GMII;
1803 val |= BGE_PORTMODE_MII;
1805 /* Turn on DMA, clear stats */
1806 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1808 /* Set misc. local control, enable interrupts on attentions */
1809 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1812 /* Assert GPIO pins for PHY reset */
1813 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1814 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1815 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1816 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1819 /* Turn on DMA completion state machine */
1820 if (!BGE_IS_5705_PLUS(sc))
1821 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1823 /* Turn on write DMA state machine */
1824 val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1825 if (BGE_IS_5755_PLUS(sc)) {
1826 /* Enable host coalescing bug fix. */
1827 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1829 if (sc->bge_asicrev == BGE_ASICREV_BCM5785) {
1830 /* Request larger DMA burst size to get better performance. */
1831 val |= BGE_WDMAMODE_BURST_ALL_DATA;
1833 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1836 if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
1837 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1838 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1839 sc->bge_asicrev == BGE_ASICREV_BCM57780) {
1841 * Enable fix for read DMA FIFO overruns.
1842 * The fix is to limit the number of RX BDs
1843 * the hardware would fetch at a fime.
1845 val = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL);
1846 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL,
1847 val| BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
1850 /* Turn on read DMA state machine */
1851 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1852 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1853 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1854 sc->bge_asicrev == BGE_ASICREV_BCM57780)
1855 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1856 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1857 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1858 if (sc->bge_flags & BGE_FLAG_PCIE)
1859 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1860 if (sc->bge_flags & BGE_FLAG_TSO)
1861 val |= BGE_RDMAMODE_TSO4_ENABLE;
1862 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1865 /* Turn on RX data completion state machine */
1866 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1868 /* Turn on RX BD initiator state machine */
1869 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1871 /* Turn on RX data and RX BD initiator state machine */
1872 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1874 /* Turn on Mbuf cluster free state machine */
1875 if (!BGE_IS_5705_PLUS(sc))
1876 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1878 /* Turn on send BD completion state machine */
1879 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1881 /* Turn on send data completion state machine */
1882 val = BGE_SDCMODE_ENABLE;
1883 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
1884 val |= BGE_SDCMODE_CDELAY;
1885 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1887 /* Turn on send data initiator state machine */
1888 if (sc->bge_flags & BGE_FLAG_TSO)
1889 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
1890 BGE_SDIMODE_HW_LSO_PRE_DMA);
1892 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1894 /* Turn on send BD initiator state machine */
1895 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1897 /* Turn on send BD selector state machine */
1898 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1900 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1901 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1902 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1904 /* ack/clear link change events */
1905 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1906 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1907 BGE_MACSTAT_LINK_CHANGED);
1908 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1911 * Enable attention when the link has changed state for
1912 * devices that use auto polling.
1914 if (sc->bge_flags & BGE_FLAG_TBI) {
1915 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1917 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
1918 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
1921 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1922 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
1923 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1924 BGE_EVTENB_MI_INTERRUPT);
1929 * Clear any pending link state attention.
1930 * Otherwise some link state change events may be lost until attention
1931 * is cleared by bge_intr() -> bge_softc.bge_link_upd() sequence.
1932 * It's not necessary on newer BCM chips - perhaps enabling link
1933 * state change attentions implies clearing pending attention.
1935 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1936 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1937 BGE_MACSTAT_LINK_CHANGED);
1939 /* Enable link state change attentions. */
1940 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1946 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1947 * against our list and return its name if we find a match. Note
1948 * that since the Broadcom controller contains VPD support, we
1949 * can get the device name string from the controller itself instead
1950 * of the compiled-in string. This is a little slow, but it guarantees
1951 * we'll always announce the right product name.
1954 bge_probe(device_t dev)
1956 const struct bge_type *t;
1957 uint16_t product, vendor;
1959 product = pci_get_device(dev);
1960 vendor = pci_get_vendor(dev);
1962 for (t = bge_devs; t->bge_name != NULL; t++) {
1963 if (vendor == t->bge_vid && product == t->bge_did)
1966 if (t->bge_name == NULL)
1969 device_set_desc(dev, t->bge_name);
1974 bge_attach(device_t dev)
1977 struct bge_softc *sc;
1978 uint32_t hwcfg = 0, misccfg;
1979 int error = 0, rid, capmask;
1980 uint8_t ether_addr[ETHER_ADDR_LEN];
1981 uint16_t product, vendor;
1982 driver_intr_t *intr_func;
1983 uintptr_t mii_priv = 0;
1987 sc = device_get_softc(dev);
1989 callout_init_mp(&sc->bge_stat_timer);
1990 lwkt_serialize_init(&sc->bge_jslot_serializer);
1992 product = pci_get_device(dev);
1993 vendor = pci_get_vendor(dev);
1995 #ifndef BURN_BRIDGES
1996 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1999 irq = pci_read_config(dev, PCIR_INTLINE, 4);
2000 mem = pci_read_config(dev, BGE_PCI_BAR0, 4);
2002 device_printf(dev, "chip is in D%d power mode "
2003 "-- setting to D0\n", pci_get_powerstate(dev));
2005 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
2007 pci_write_config(dev, PCIR_INTLINE, irq, 4);
2008 pci_write_config(dev, BGE_PCI_BAR0, mem, 4);
2010 #endif /* !BURN_BRIDGE */
2013 * Map control/status registers.
2015 pci_enable_busmaster(dev);
2018 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2021 if (sc->bge_res == NULL) {
2022 device_printf(dev, "couldn't map memory\n");
2026 sc->bge_btag = rman_get_bustag(sc->bge_res);
2027 sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
2029 /* Save various chip information */
2031 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2032 BGE_PCIMISCCTL_ASICREV_SHIFT;
2033 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
2034 /* All chips, which use BGE_PCI_PRODID_ASICREV, have CPMU */
2035 sc->bge_flags |= BGE_FLAG_CPMU;
2036 sc->bge_chipid = pci_read_config(dev, BGE_PCI_PRODID_ASICREV, 4);
2038 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2039 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2041 /* Save chipset family. */
2042 switch (sc->bge_asicrev) {
2043 case BGE_ASICREV_BCM5755:
2044 case BGE_ASICREV_BCM5761:
2045 case BGE_ASICREV_BCM5784:
2046 case BGE_ASICREV_BCM5785:
2047 case BGE_ASICREV_BCM5787:
2048 case BGE_ASICREV_BCM57780:
2049 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
2053 case BGE_ASICREV_BCM5700:
2054 case BGE_ASICREV_BCM5701:
2055 case BGE_ASICREV_BCM5703:
2056 case BGE_ASICREV_BCM5704:
2057 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2060 case BGE_ASICREV_BCM5714_A0:
2061 case BGE_ASICREV_BCM5780:
2062 case BGE_ASICREV_BCM5714:
2063 sc->bge_flags |= BGE_FLAG_5714_FAMILY;
2066 case BGE_ASICREV_BCM5750:
2067 case BGE_ASICREV_BCM5752:
2068 case BGE_ASICREV_BCM5906:
2069 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2072 case BGE_ASICREV_BCM5705:
2073 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2077 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
2078 sc->bge_flags |= BGE_FLAG_NO_EEPROM;
2080 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK;
2081 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2082 (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2083 misccfg == BGE_MISCCFG_BOARD_ID_5788M))
2084 sc->bge_flags |= BGE_FLAG_5788;
2086 /* BCM5755 or higher and BCM5906 have short DMA bug. */
2087 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
2088 sc->bge_flags |= BGE_FLAG_SHORTDMA;
2091 * Check if this is a PCI-X or PCI Express device.
2093 if (BGE_IS_5705_PLUS(sc)) {
2094 if (pci_is_pcie(dev)) {
2095 sc->bge_flags |= BGE_FLAG_PCIE;
2096 sc->bge_pciecap = pci_get_pciecap_ptr(sc->bge_dev);
2097 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096);
2101 * Check if the device is in PCI-X Mode.
2102 * (This bit is not valid on PCI Express controllers.)
2104 if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
2105 BGE_PCISTATE_PCI_BUSMODE) == 0) {
2106 sc->bge_flags |= BGE_FLAG_PCIX;
2107 sc->bge_pcixcap = pci_get_pcixcap_ptr(sc->bge_dev);
2108 sc->bge_mbox_reorder = device_getenv_int(sc->bge_dev,
2112 device_printf(dev, "CHIP ID 0x%08x; "
2113 "ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
2114 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
2115 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X"
2116 : ((sc->bge_flags & BGE_FLAG_PCIE) ?
2120 * The 40bit DMA bug applies to the 5714/5715 controllers and is
2121 * not actually a MAC controller bug but an issue with the embedded
2122 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
2124 if ((sc->bge_flags & BGE_FLAG_PCIX) &&
2125 (BGE_IS_5714_FAMILY(sc) || device_getenv_int(dev, "dma40b", 0)))
2126 sc->bge_flags |= BGE_FLAG_MAXADDR_40BIT;
2129 * When using the BCM5701 in PCI-X mode, data corruption has
2130 * been observed in the first few bytes of some received packets.
2131 * Aligning the packet buffer in memory eliminates the corruption.
2132 * Unfortunately, this misaligns the packet payloads. On platforms
2133 * which do not support unaligned accesses, we will realign the
2134 * payloads by copying the received packets.
2136 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
2137 (sc->bge_flags & BGE_FLAG_PCIX))
2138 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
2140 if (!BGE_IS_CRIPPLED(sc)) {
2141 if (device_getenv_int(dev, "status_tag", 1)) {
2142 sc->bge_flags |= BGE_FLAG_STATUS_TAG;
2143 sc->bge_pci_miscctl = BGE_PCIMISCCTL_TAGGED_STATUS;
2145 device_printf(dev, "enable status tag\n");
2149 if (BGE_IS_5755_PLUS(sc)) {
2151 * BCM5754 and BCM5787 shares the same ASIC id so
2152 * explicit device id check is required.
2153 * Due to unknown reason TSO does not work on BCM5755M.
2155 if (product != PCI_PRODUCT_BROADCOM_BCM5754 &&
2156 product != PCI_PRODUCT_BROADCOM_BCM5754M &&
2157 product != PCI_PRODUCT_BROADCOM_BCM5755M)
2158 sc->bge_flags |= BGE_FLAG_TSO;
2162 * Set various PHY quirk flags.
2165 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
2166 sc->bge_asicrev == BGE_ASICREV_BCM5701) &&
2167 pci_get_subvendor(dev) == PCI_VENDOR_DELL)
2168 mii_priv |= BRGPHY_FLAG_NO_3LED;
2170 capmask = MII_CAPMASK_DEFAULT;
2171 if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 &&
2172 (misccfg == 0x4000 || misccfg == 0x8000)) ||
2173 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2174 vendor == PCI_VENDOR_BROADCOM &&
2175 (product == PCI_PRODUCT_BROADCOM_BCM5901 ||
2176 product == PCI_PRODUCT_BROADCOM_BCM5901A2 ||
2177 product == PCI_PRODUCT_BROADCOM_BCM5705F)) ||
2178 (vendor == PCI_VENDOR_BROADCOM &&
2179 (product == PCI_PRODUCT_BROADCOM_BCM5751F ||
2180 product == PCI_PRODUCT_BROADCOM_BCM5753F ||
2181 product == PCI_PRODUCT_BROADCOM_BCM5787F)) ||
2182 product == PCI_PRODUCT_BROADCOM_BCM57790 ||
2183 sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2185 capmask &= ~BMSR_EXTSTAT;
2188 mii_priv |= BRGPHY_FLAG_WIRESPEED;
2189 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
2190 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2191 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2192 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
2193 sc->bge_asicrev == BGE_ASICREV_BCM5906)
2194 mii_priv &= ~BRGPHY_FLAG_WIRESPEED;
2196 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2197 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2198 mii_priv |= BRGPHY_FLAG_CRC_BUG;
2200 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2201 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2202 mii_priv |= BRGPHY_FLAG_ADC_BUG;
2204 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2205 mii_priv |= BRGPHY_FLAG_5704_A0;
2207 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
2208 mii_priv |= BRGPHY_FLAG_5906;
2210 if (BGE_IS_5705_PLUS(sc) &&
2211 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2212 /* sc->bge_asicrev != BGE_ASICREV_BCM5717 && */
2213 sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
2214 /* sc->bge_asicrev != BGE_ASICREV_BCM57765 && */
2215 sc->bge_asicrev != BGE_ASICREV_BCM57780) {
2216 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2217 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2218 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2219 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2220 if (product != PCI_PRODUCT_BROADCOM_BCM5722 &&
2221 product != PCI_PRODUCT_BROADCOM_BCM5756)
2222 mii_priv |= BRGPHY_FLAG_JITTER_BUG;
2223 if (product == PCI_PRODUCT_BROADCOM_BCM5755M)
2224 mii_priv |= BRGPHY_FLAG_ADJUST_TRIM;
2226 mii_priv |= BRGPHY_FLAG_BER_BUG;
2231 * Allocate interrupt
2233 msi_enable = bge_msi_enable;
2234 if ((sc->bge_flags & BGE_FLAG_STATUS_TAG) == 0) {
2235 /* If "tagged status" is disabled, don't enable MSI */
2237 } else if (msi_enable) {
2238 msi_enable = 0; /* Disable by default */
2239 if (BGE_IS_575X_PLUS(sc)) {
2241 /* XXX we filter all 5714 chips */
2242 if (sc->bge_asicrev == BGE_ASICREV_BCM5714 ||
2243 (sc->bge_asicrev == BGE_ASICREV_BCM5750 &&
2244 (sc->bge_chiprev == BGE_CHIPREV_5750_AX ||
2245 sc->bge_chiprev == BGE_CHIPREV_5750_BX)))
2247 else if (BGE_IS_5755_PLUS(sc) ||
2248 sc->bge_asicrev == BGE_ASICREV_BCM5906)
2249 sc->bge_flags |= BGE_FLAG_ONESHOT_MSI;
2253 if (pci_find_extcap(dev, PCIY_MSI, &sc->bge_msicap)) {
2254 device_printf(dev, "no MSI capability\n");
2259 sc->bge_irq_type = pci_alloc_1intr(dev, msi_enable, &sc->bge_irq_rid,
2262 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->bge_irq_rid,
2264 if (sc->bge_irq == NULL) {
2265 device_printf(dev, "couldn't map interrupt\n");
2270 if (sc->bge_irq_type == PCI_INTR_TYPE_MSI)
2273 sc->bge_flags &= ~BGE_FLAG_ONESHOT_MSI;
2275 /* Initialize if_name earlier, so if_printf could be used */
2276 ifp = &sc->arpcom.ac_if;
2277 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2279 /* Try to reset the chip. */
2282 if (bge_chipinit(sc)) {
2283 device_printf(dev, "chip initialization failed\n");
2289 * Get station address
2291 error = bge_get_eaddr(sc, ether_addr);
2293 device_printf(dev, "failed to read station address\n");
2297 /* 5705/5750 limits RX return ring to 512 entries. */
2298 if (BGE_IS_5705_PLUS(sc))
2299 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2301 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2303 error = bge_dma_alloc(sc);
2307 /* Set default tuneable values. */
2308 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2309 sc->bge_rx_coal_ticks = BGE_RX_COAL_TICKS_DEF;
2310 sc->bge_tx_coal_ticks = BGE_TX_COAL_TICKS_DEF;
2311 sc->bge_rx_coal_bds = BGE_RX_COAL_BDS_DEF;
2312 sc->bge_tx_coal_bds = BGE_TX_COAL_BDS_DEF;
2313 if (sc->bge_flags & BGE_FLAG_STATUS_TAG) {
2314 sc->bge_rx_coal_ticks_int = BGE_RX_COAL_TICKS_DEF;
2315 sc->bge_tx_coal_ticks_int = BGE_TX_COAL_TICKS_DEF;
2316 sc->bge_rx_coal_bds_int = BGE_RX_COAL_BDS_DEF;
2317 sc->bge_tx_coal_bds_int = BGE_TX_COAL_BDS_DEF;
2319 sc->bge_rx_coal_ticks_int = BGE_RX_COAL_TICKS_MIN;
2320 sc->bge_tx_coal_ticks_int = BGE_TX_COAL_TICKS_MIN;
2321 sc->bge_rx_coal_bds_int = BGE_RX_COAL_BDS_MIN;
2322 sc->bge_tx_coal_bds_int = BGE_TX_COAL_BDS_MIN;
2325 /* Set up TX spare and reserved descriptor count */
2326 if (sc->bge_flags & BGE_FLAG_TSO) {
2327 sc->bge_txspare = BGE_NSEG_SPARE_TSO;
2328 sc->bge_txrsvd = BGE_NSEG_RSVD_TSO;
2330 sc->bge_txspare = BGE_NSEG_SPARE;
2331 sc->bge_txrsvd = BGE_NSEG_RSVD;
2334 /* Set up ifnet structure */
2336 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2337 ifp->if_ioctl = bge_ioctl;
2338 ifp->if_start = bge_start;
2339 #ifdef IFPOLL_ENABLE
2340 ifp->if_npoll = bge_npoll;
2342 ifp->if_watchdog = bge_watchdog;
2343 ifp->if_init = bge_init;
2344 ifp->if_mtu = ETHERMTU;
2345 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2346 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
2347 ifq_set_ready(&ifp->if_snd);
2350 * 5700 B0 chips do not support checksumming correctly due
2353 if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) {
2354 ifp->if_capabilities |= IFCAP_HWCSUM;
2355 ifp->if_hwassist |= BGE_CSUM_FEATURES;
2357 if (sc->bge_flags & BGE_FLAG_TSO) {
2358 ifp->if_capabilities |= IFCAP_TSO;
2359 ifp->if_hwassist |= CSUM_TSO;
2361 ifp->if_capenable = ifp->if_capabilities;
2364 * Figure out what sort of media we have by checking the
2365 * hardware config word in the first 32k of NIC internal memory,
2366 * or fall back to examining the EEPROM if necessary.
2367 * Note: on some BCM5700 cards, this value appears to be unset.
2368 * If that's the case, we have to rely on identifying the NIC
2369 * by its PCI subsystem ID, as we do below for the SysKonnect
2372 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) {
2373 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2375 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2377 device_printf(dev, "failed to read EEPROM\n");
2381 hwcfg = ntohl(hwcfg);
2384 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2385 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41 ||
2386 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
2387 if (BGE_IS_5714_FAMILY(sc))
2388 sc->bge_flags |= BGE_FLAG_MII_SERDES;
2390 sc->bge_flags |= BGE_FLAG_TBI;
2394 if (sc->bge_flags & BGE_FLAG_CPMU)
2395 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST;
2397 sc->bge_mi_mode = BGE_MIMODE_BASE;
2398 if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705) {
2399 /* Enable auto polling for BCM570[0-5]. */
2400 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL;
2403 /* Setup link status update stuffs */
2404 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2405 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
2406 sc->bge_link_upd = bge_bcm5700_link_upd;
2407 sc->bge_link_chg = BGE_MACSTAT_MI_INTERRUPT;
2408 } else if (sc->bge_flags & BGE_FLAG_TBI) {
2409 sc->bge_link_upd = bge_tbi_link_upd;
2410 sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED;
2411 } else if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
2412 sc->bge_link_upd = bge_autopoll_link_upd;
2413 sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED;
2415 sc->bge_link_upd = bge_copper_link_upd;
2416 sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED;
2420 * Broadcom's own driver always assumes the internal
2421 * PHY is at GMII address 1. On some chips, the PHY responds
2422 * to accesses at all addresses, which could cause us to
2423 * bogusly attach the PHY 32 times at probe type. Always
2424 * restricting the lookup to address 1 is simpler than
2425 * trying to figure out which chips revisions should be
2430 if (sc->bge_flags & BGE_FLAG_TBI) {
2431 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2432 bge_ifmedia_upd, bge_ifmedia_sts);
2433 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2434 ifmedia_add(&sc->bge_ifmedia,
2435 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2436 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2437 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2438 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2440 struct mii_probe_args mii_args;
2442 mii_probe_args_init(&mii_args, bge_ifmedia_upd, bge_ifmedia_sts);
2443 mii_args.mii_probemask = 1 << sc->bge_phyno;
2444 mii_args.mii_capmask = capmask;
2445 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY;
2446 mii_args.mii_priv = mii_priv;
2448 error = mii_probe(dev, &sc->bge_miibus, &mii_args);
2450 device_printf(dev, "MII without any PHY!\n");
2456 * Create sysctl nodes.
2458 sysctl_ctx_init(&sc->bge_sysctl_ctx);
2459 sc->bge_sysctl_tree = SYSCTL_ADD_NODE(&sc->bge_sysctl_ctx,
2460 SYSCTL_STATIC_CHILDREN(_hw),
2462 device_get_nameunit(dev),
2464 if (sc->bge_sysctl_tree == NULL) {
2465 device_printf(dev, "can't add sysctl node\n");
2470 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2471 SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2472 OID_AUTO, "rx_coal_ticks",
2473 CTLTYPE_INT | CTLFLAG_RW,
2474 sc, 0, bge_sysctl_rx_coal_ticks, "I",
2475 "Receive coalescing ticks (usec).");
2476 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2477 SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2478 OID_AUTO, "tx_coal_ticks",
2479 CTLTYPE_INT | CTLFLAG_RW,
2480 sc, 0, bge_sysctl_tx_coal_ticks, "I",
2481 "Transmit coalescing ticks (usec).");
2482 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2483 SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2484 OID_AUTO, "rx_coal_bds",
2485 CTLTYPE_INT | CTLFLAG_RW,
2486 sc, 0, bge_sysctl_rx_coal_bds, "I",
2487 "Receive max coalesced BD count.");
2488 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2489 SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2490 OID_AUTO, "tx_coal_bds",
2491 CTLTYPE_INT | CTLFLAG_RW,
2492 sc, 0, bge_sysctl_tx_coal_bds, "I",
2493 "Transmit max coalesced BD count.");
2494 if (sc->bge_flags & BGE_FLAG_PCIE) {
2496 * A common design characteristic for many Broadcom
2497 * client controllers is that they only support a
2498 * single outstanding DMA read operation on the PCIe
2499 * bus. This means that it will take twice as long to
2500 * fetch a TX frame that is split into header and
2501 * payload buffers as it does to fetch a single,
2502 * contiguous TX frame (2 reads vs. 1 read). For these
2503 * controllers, coalescing buffers to reduce the number
2504 * of memory reads is effective way to get maximum
2505 * performance(about 940Mbps). Without collapsing TX
2506 * buffers the maximum TCP bulk transfer performance
2507 * is about 850Mbps. However forcing coalescing mbufs
2508 * consumes a lot of CPU cycles, so leave it off by
2511 SYSCTL_ADD_INT(&sc->bge_sysctl_ctx,
2512 SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2513 OID_AUTO, "force_defrag", CTLFLAG_RW,
2514 &sc->bge_force_defrag, 0,
2515 "Force defragment on TX path");
2517 if (sc->bge_flags & BGE_FLAG_STATUS_TAG) {
2518 if (!BGE_IS_5705_PLUS(sc)) {
2519 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2520 SYSCTL_CHILDREN(sc->bge_sysctl_tree), OID_AUTO,
2521 "rx_coal_ticks_int", CTLTYPE_INT | CTLFLAG_RW,
2522 sc, 0, bge_sysctl_rx_coal_ticks_int, "I",
2523 "Receive coalescing ticks "
2524 "during interrupt (usec).");
2525 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2526 SYSCTL_CHILDREN(sc->bge_sysctl_tree), OID_AUTO,
2527 "tx_coal_ticks_int", CTLTYPE_INT | CTLFLAG_RW,
2528 sc, 0, bge_sysctl_tx_coal_ticks_int, "I",
2529 "Transmit coalescing ticks "
2530 "during interrupt (usec).");
2532 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2533 SYSCTL_CHILDREN(sc->bge_sysctl_tree), OID_AUTO,
2534 "rx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2535 sc, 0, bge_sysctl_rx_coal_bds_int, "I",
2536 "Receive max coalesced BD count during interrupt.");
2537 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2538 SYSCTL_CHILDREN(sc->bge_sysctl_tree), OID_AUTO,
2539 "tx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2540 sc, 0, bge_sysctl_tx_coal_bds_int, "I",
2541 "Transmit max coalesced BD count during interrupt.");
2545 * Call MI attach routine.
2547 ether_ifattach(ifp, ether_addr, NULL);
2549 #ifdef IFPOLL_ENABLE
2551 ifpoll_compat_setup(&sc->bge_npoll,
2552 &sc->bge_sysctl_ctx, sc->bge_sysctl_tree, device_get_unit(dev),
2553 ifp->if_serializer);
2556 if (sc->bge_irq_type == PCI_INTR_TYPE_MSI) {
2557 if (sc->bge_flags & BGE_FLAG_ONESHOT_MSI) {
2558 intr_func = bge_msi_oneshot;
2560 device_printf(dev, "oneshot MSI\n");
2562 intr_func = bge_msi;
2564 } else if (sc->bge_flags & BGE_FLAG_STATUS_TAG) {
2565 intr_func = bge_intr_legacy;
2567 intr_func = bge_intr_crippled;
2569 error = bus_setup_intr(dev, sc->bge_irq, INTR_MPSAFE, intr_func, sc,
2570 &sc->bge_intrhand, ifp->if_serializer);
2572 ether_ifdetach(ifp);
2573 device_printf(dev, "couldn't set up irq\n");
2577 ifp->if_cpuid = rman_get_cpuid(sc->bge_irq);
2578 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
2587 bge_detach(device_t dev)
2589 struct bge_softc *sc = device_get_softc(dev);
2591 if (device_is_attached(dev)) {
2592 struct ifnet *ifp = &sc->arpcom.ac_if;
2594 lwkt_serialize_enter(ifp->if_serializer);
2597 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2598 lwkt_serialize_exit(ifp->if_serializer);
2600 ether_ifdetach(ifp);
2603 if (sc->bge_flags & BGE_FLAG_TBI)
2604 ifmedia_removeall(&sc->bge_ifmedia);
2606 device_delete_child(dev, sc->bge_miibus);
2607 bus_generic_detach(dev);
2609 if (sc->bge_irq != NULL) {
2610 bus_release_resource(dev, SYS_RES_IRQ, sc->bge_irq_rid,
2613 if (sc->bge_irq_type == PCI_INTR_TYPE_MSI)
2614 pci_release_msi(dev);
2616 if (sc->bge_res != NULL) {
2617 bus_release_resource(dev, SYS_RES_MEMORY,
2618 BGE_PCI_BAR0, sc->bge_res);
2621 if (sc->bge_sysctl_tree != NULL)
2622 sysctl_ctx_free(&sc->bge_sysctl_ctx);
2630 bge_reset(struct bge_softc *sc)
2633 uint32_t cachesize, command, pcistate, reset;
2634 void (*write_op)(struct bge_softc *, uint32_t, uint32_t);
2639 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
2640 sc->bge_asicrev != BGE_ASICREV_BCM5906) {
2641 if (sc->bge_flags & BGE_FLAG_PCIE)
2642 write_op = bge_writemem_direct;
2644 write_op = bge_writemem_ind;
2646 write_op = bge_writereg_ind;
2649 /* Save some important PCI state. */
2650 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2651 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2652 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2654 pci_write_config(dev, BGE_PCI_MISC_CTL,
2655 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2656 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2657 sc->bge_pci_miscctl, 4);
2659 /* Disable fastboot on controllers that support it. */
2660 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
2661 BGE_IS_5755_PLUS(sc)) {
2663 if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n");
2664 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2668 * Write the magic number to SRAM at offset 0xB50.
2669 * When firmware finishes its initialization it will
2670 * write ~BGE_MAGIC_NUMBER to the same location.
2672 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2674 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2676 /* XXX: Broadcom Linux driver. */
2677 if (sc->bge_flags & BGE_FLAG_PCIE) {
2678 /* Force PCI-E 1.0a mode */
2679 if (sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
2680 CSR_READ_4(sc, BGE_PCIE_PHY_TSTCTL) ==
2681 (BGE_PCIE_PHY_TSTCTL_PSCRAM |
2682 BGE_PCIE_PHY_TSTCTL_PCIE10)) {
2683 CSR_WRITE_4(sc, BGE_PCIE_PHY_TSTCTL,
2684 BGE_PCIE_PHY_TSTCTL_PSCRAM);
2686 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2687 /* Prevent PCIE link training during global reset */
2688 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2694 * Set GPHY Power Down Override to leave GPHY
2695 * powered up in D0 uninitialized.
2697 if (BGE_IS_5705_PLUS(sc) && (sc->bge_flags & BGE_FLAG_CPMU) == 0)
2698 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
2700 /* Issue global reset */
2701 write_op(sc, BGE_MISC_CFG, reset);
2703 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2704 uint32_t status, ctrl;
2706 status = CSR_READ_4(sc, BGE_VCPU_STATUS);
2707 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
2708 status | BGE_VCPU_STATUS_DRV_RESET);
2709 ctrl = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
2710 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
2711 ctrl & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
2716 /* XXX: Broadcom Linux driver. */
2717 if (sc->bge_flags & BGE_FLAG_PCIE) {
2720 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2723 DELAY(500000); /* wait for link training to complete */
2724 v = pci_read_config(dev, 0xc4, 4);
2725 pci_write_config(dev, 0xc4, v | (1<<15), 4);
2728 devctl = pci_read_config(dev,
2729 sc->bge_pciecap + PCIER_DEVCTRL, 2);
2731 /* Disable no snoop and disable relaxed ordering. */
2732 devctl &= ~(PCIEM_DEVCTL_RELAX_ORDER | PCIEM_DEVCTL_NOSNOOP);
2734 /* Old PCI-E chips only support 128 bytes Max PayLoad Size. */
2735 if ((sc->bge_flags & BGE_FLAG_CPMU) == 0) {
2736 devctl &= ~PCIEM_DEVCTL_MAX_PAYLOAD_MASK;
2737 devctl |= PCIEM_DEVCTL_MAX_PAYLOAD_128;
2740 pci_write_config(dev, sc->bge_pciecap + PCIER_DEVCTRL,
2743 /* Clear error status. */
2744 pci_write_config(dev, sc->bge_pciecap + PCIER_DEVSTS,
2745 PCIEM_DEVSTS_CORR_ERR |
2746 PCIEM_DEVSTS_NFATAL_ERR |
2747 PCIEM_DEVSTS_FATAL_ERR |
2748 PCIEM_DEVSTS_UNSUPP_REQ, 2);
2751 /* Reset some of the PCI state that got zapped by reset */
2752 pci_write_config(dev, BGE_PCI_MISC_CTL,
2753 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2754 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2755 sc->bge_pci_miscctl, 4);
2756 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2757 pci_write_config(dev, BGE_PCI_CMD, command, 4);
2758 write_op(sc, BGE_MISC_CFG, (65 << 1));
2761 * Disable PCI-X relaxed ordering to ensure status block update
2762 * comes first then packet buffer DMA. Otherwise driver may
2763 * read stale status block.
2765 if (sc->bge_flags & BGE_FLAG_PCIX) {
2768 devctl = pci_read_config(dev,
2769 sc->bge_pcixcap + PCIXR_COMMAND, 2);
2770 devctl &= ~PCIXM_COMMAND_ERO;
2771 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
2772 devctl &= ~PCIXM_COMMAND_MAX_READ;
2773 devctl |= PCIXM_COMMAND_MAX_READ_2048;
2774 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
2775 devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
2776 PCIXM_COMMAND_MAX_READ);
2777 devctl |= PCIXM_COMMAND_MAX_READ_2048;
2779 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
2784 * Enable memory arbiter and re-enable MSI if necessary.
2786 if (BGE_IS_5714_FAMILY(sc)) {
2789 if (sc->bge_irq_type == PCI_INTR_TYPE_MSI) {
2791 * Resetting BCM5714 family will clear MSI
2792 * enable bit; restore it after resetting.
2794 PCI_SETBIT(sc->bge_dev, sc->bge_msicap + PCIR_MSI_CTRL,
2795 PCIM_MSICTRL_MSI_ENABLE, 2);
2796 BGE_SETBIT(sc, BGE_MSI_MODE, BGE_MSIMODE_ENABLE);
2798 val = CSR_READ_4(sc, BGE_MARB_MODE);
2799 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2801 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2804 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2805 for (i = 0; i < BGE_TIMEOUT; i++) {
2806 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2807 if (val & BGE_VCPU_STATUS_INIT_DONE)
2811 if (i == BGE_TIMEOUT) {
2812 if_printf(&sc->arpcom.ac_if, "reset timed out\n");
2817 * Poll until we see the 1's complement of the magic number.
2818 * This indicates that the firmware initialization
2821 for (i = 0; i < BGE_FIRMWARE_TIMEOUT; i++) {
2822 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2823 if (val == ~BGE_MAGIC_NUMBER)
2827 if (i == BGE_FIRMWARE_TIMEOUT) {
2828 if_printf(&sc->arpcom.ac_if, "firmware handshake "
2829 "timed out, found 0x%08x\n", val);
2834 * XXX Wait for the value of the PCISTATE register to
2835 * return to its original pre-reset state. This is a
2836 * fairly good indicator of reset completion. If we don't
2837 * wait for the reset to fully complete, trying to read
2838 * from the device's non-PCI registers may yield garbage
2841 for (i = 0; i < BGE_TIMEOUT; i++) {
2842 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2847 /* Fix up byte swapping */
2848 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
2849 BGE_MODECTL_BYTESWAP_DATA);
2851 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2854 * The 5704 in TBI mode apparently needs some special
2855 * adjustment to insure the SERDES drive level is set
2858 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
2859 (sc->bge_flags & BGE_FLAG_TBI)) {
2862 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2863 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2864 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2867 /* XXX: Broadcom Linux driver. */
2868 if ((sc->bge_flags & BGE_FLAG_PCIE) &&
2869 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
2870 sc->bge_asicrev != BGE_ASICREV_BCM5785) {
2873 /* Enable Data FIFO protection. */
2874 v = CSR_READ_4(sc, BGE_PCIE_TLDLPL_PORT);
2875 CSR_WRITE_4(sc, BGE_PCIE_TLDLPL_PORT, v | (1 << 25));
2882 * Frame reception handling. This is called if there's a frame
2883 * on the receive return list.
2885 * Note: we have to be able to handle two possibilities here:
2886 * 1) the frame is from the jumbo recieve ring
2887 * 2) the frame is from the standard receive ring
2891 bge_rxeof(struct bge_softc *sc, uint16_t rx_prod)
2894 int stdcnt = 0, jumbocnt = 0;
2896 ifp = &sc->arpcom.ac_if;
2898 while (sc->bge_rx_saved_considx != rx_prod) {
2899 struct bge_rx_bd *cur_rx;
2901 struct mbuf *m = NULL;
2902 uint16_t vlan_tag = 0;
2906 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2908 rxidx = cur_rx->bge_idx;
2909 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2912 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2914 vlan_tag = cur_rx->bge_vlan_tag;
2917 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2918 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2921 if (rxidx != sc->bge_jumbo) {
2923 if_printf(ifp, "sw jumbo index(%d) "
2924 "and hw jumbo index(%d) mismatch, drop!\n",
2925 sc->bge_jumbo, rxidx);
2926 bge_setup_rxdesc_jumbo(sc, rxidx);
2930 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx].bge_mbuf;
2931 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2933 bge_setup_rxdesc_jumbo(sc, sc->bge_jumbo);
2936 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 0)) {
2938 bge_setup_rxdesc_jumbo(sc, sc->bge_jumbo);
2942 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2945 if (rxidx != sc->bge_std) {
2947 if_printf(ifp, "sw std index(%d) "
2948 "and hw std index(%d) mismatch, drop!\n",
2949 sc->bge_std, rxidx);
2950 bge_setup_rxdesc_std(sc, rxidx);
2954 m = sc->bge_cdata.bge_rx_std_chain[rxidx].bge_mbuf;
2955 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2957 bge_setup_rxdesc_std(sc, sc->bge_std);
2960 if (bge_newbuf_std(sc, sc->bge_std, 0)) {
2962 bge_setup_rxdesc_std(sc, sc->bge_std);
2968 #if !defined(__i386__) && !defined(__x86_64__)
2970 * The x86 allows unaligned accesses, but for other
2971 * platforms we must make sure the payload is aligned.
2973 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
2974 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2976 m->m_data += ETHER_ALIGN;
2979 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2980 m->m_pkthdr.rcvif = ifp;
2982 if (ifp->if_capenable & IFCAP_RXCSUM) {
2983 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2984 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2985 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2986 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2988 if ((cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) &&
2989 m->m_pkthdr.len >= BGE_MIN_FRAMELEN) {
2990 m->m_pkthdr.csum_data =
2991 cur_rx->bge_tcp_udp_csum;
2992 m->m_pkthdr.csum_flags |=
2993 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2998 * If we received a packet with a vlan tag, pass it
2999 * to vlan_input() instead of ether_input().
3002 m->m_flags |= M_VLANTAG;
3003 m->m_pkthdr.ether_vlantag = vlan_tag;
3004 have_tag = vlan_tag = 0;
3006 ifp->if_input(ifp, m);
3009 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3011 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
3013 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
3017 bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
3021 ifp = &sc->arpcom.ac_if;
3024 * Go through our tx ring and free mbufs for those
3025 * frames that have been sent.
3027 while (sc->bge_tx_saved_considx != tx_cons) {
3030 idx = sc->bge_tx_saved_considx;
3031 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
3033 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
3034 sc->bge_cdata.bge_tx_dmamap[idx]);
3035 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
3036 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3039 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3043 if ((BGE_TX_RING_CNT - sc->bge_txcnt) >=
3044 (sc->bge_txrsvd + sc->bge_txspare))
3045 ifp->if_flags &= ~IFF_OACTIVE;
3047 if (sc->bge_txcnt == 0)
3050 if (!ifq_is_empty(&ifp->if_snd))
3054 #ifdef IFPOLL_ENABLE
3057 bge_npoll_compat(struct ifnet *ifp, void *arg __unused, int cycles __unused)
3059 struct bge_softc *sc = ifp->if_softc;
3060 struct bge_status_block *sblk = sc->bge_ldata.bge_status_block;
3061 uint16_t rx_prod, tx_cons;
3063 ASSERT_SERIALIZED(ifp->if_serializer);
3065 if (sc->bge_npoll.ifpc_stcount-- == 0) {
3066 sc->bge_npoll.ifpc_stcount = sc->bge_npoll.ifpc_stfrac;
3068 * Process link state changes.
3073 if (sc->bge_flags & BGE_FLAG_STATUS_TAG) {
3074 sc->bge_status_tag = sblk->bge_status_tag;
3076 * Use a load fence to ensure that status_tag
3077 * is saved before rx_prod and tx_cons.
3082 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
3083 if (sc->bge_rx_saved_considx != rx_prod)
3084 bge_rxeof(sc, rx_prod);
3086 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
3087 if (sc->bge_tx_saved_considx != tx_cons)
3088 bge_txeof(sc, tx_cons);
3090 if (sc->bge_flags & BGE_FLAG_STATUS_TAG)
3091 bge_writembx(sc, BGE_MBX_IRQ0_LO, sc->bge_status_tag << 24);
3093 if (sc->bge_coal_chg)
3094 bge_coal_change(sc);
3098 bge_npoll(struct ifnet *ifp, struct ifpoll_info *info)
3100 struct bge_softc *sc = ifp->if_softc;
3102 ASSERT_SERIALIZED(ifp->if_serializer);
3105 int cpuid = sc->bge_npoll.ifpc_cpuid;
3107 info->ifpi_rx[cpuid].poll_func = bge_npoll_compat;
3108 info->ifpi_rx[cpuid].arg = NULL;
3109 info->ifpi_rx[cpuid].serializer = ifp->if_serializer;
3111 if (ifp->if_flags & IFF_RUNNING)
3112 bge_disable_intr(sc);
3113 ifp->if_npoll_cpuid = cpuid;
3115 if (ifp->if_flags & IFF_RUNNING)
3116 bge_enable_intr(sc);
3117 ifp->if_npoll_cpuid = -1;
3121 #endif /* IFPOLL_ENABLE */
3124 bge_intr_crippled(void *xsc)
3126 struct bge_softc *sc = xsc;
3127 struct ifnet *ifp = &sc->arpcom.ac_if;
3132 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
3133 * disable interrupts by writing nonzero like we used to, since with
3134 * our current organization this just gives complications and
3135 * pessimizations for re-enabling interrupts. We used to have races
3136 * instead of the necessary complications. Disabling interrupts
3137 * would just reduce the chance of a status update while we are
3138 * running (by switching to the interrupt-mode coalescence
3139 * parameters), but this chance is already very low so it is more
3140 * efficient to get another interrupt than prevent it.
3142 * We do the ack first to ensure another interrupt if there is a
3143 * status update after the ack. We don't check for the status
3144 * changing later because it is more efficient to get another
3145 * interrupt than prevent it, not quite as above (not checking is
3146 * a smaller optimization than not toggling the interrupt enable,
3147 * since checking doesn't involve PCI accesses and toggling require
3148 * the status check). So toggling would probably be a pessimization
3149 * even with MSI. It would only be needed for using a task queue.
3151 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3154 * Process link state changes.
3158 if (ifp->if_flags & IFF_RUNNING) {
3159 struct bge_status_block *sblk = sc->bge_ldata.bge_status_block;
3160 uint16_t rx_prod, tx_cons;
3162 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
3163 if (sc->bge_rx_saved_considx != rx_prod)
3164 bge_rxeof(sc, rx_prod);
3166 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
3167 if (sc->bge_tx_saved_considx != tx_cons)
3168 bge_txeof(sc, tx_cons);
3171 if (sc->bge_coal_chg)
3172 bge_coal_change(sc);
3176 bge_intr_legacy(void *xsc)
3178 struct bge_softc *sc = xsc;
3179 struct bge_status_block *sblk = sc->bge_ldata.bge_status_block;
3181 if (sc->bge_status_tag == sblk->bge_status_tag) {
3184 val = pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4);
3185 if (val & BGE_PCISTAT_INTR_NOTACT)
3191 * Interrupt will have to be disabled if tagged status
3192 * is used, else interrupt will always be asserted on
3193 * certain chips (at least on BCM5750 AX/BX).
3195 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3203 struct bge_softc *sc = xsc;
3205 /* Disable interrupt first */
3206 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3211 bge_msi_oneshot(void *xsc)
3217 bge_intr(struct bge_softc *sc)
3219 struct ifnet *ifp = &sc->arpcom.ac_if;
3220 struct bge_status_block *sblk = sc->bge_ldata.bge_status_block;
3221 uint16_t rx_prod, tx_cons;
3224 sc->bge_status_tag = sblk->bge_status_tag;
3226 * Use a load fence to ensure that status_tag is saved
3227 * before rx_prod, tx_cons and status.
3231 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
3232 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
3233 status = sblk->bge_status;
3235 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) || sc->bge_link_evt)
3238 if (ifp->if_flags & IFF_RUNNING) {
3239 if (sc->bge_rx_saved_considx != rx_prod)
3240 bge_rxeof(sc, rx_prod);
3242 if (sc->bge_tx_saved_considx != tx_cons)
3243 bge_txeof(sc, tx_cons);
3246 bge_writembx(sc, BGE_MBX_IRQ0_LO, sc->bge_status_tag << 24);
3248 if (sc->bge_coal_chg)
3249 bge_coal_change(sc);
3255 struct bge_softc *sc = xsc;
3256 struct ifnet *ifp = &sc->arpcom.ac_if;
3258 lwkt_serialize_enter(ifp->if_serializer);
3260 if (BGE_IS_5705_PLUS(sc))
3261 bge_stats_update_regs(sc);
3263 bge_stats_update(sc);
3265 if (sc->bge_flags & BGE_FLAG_TBI) {
3267 * Since in TBI mode auto-polling can't be used we should poll
3268 * link status manually. Here we register pending link event
3269 * and trigger interrupt.
3272 if (BGE_IS_CRIPPLED(sc))
3273 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3275 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3276 } else if (!sc->bge_link) {
3277 mii_tick(device_get_softc(sc->bge_miibus));
3280 callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
3282 lwkt_serialize_exit(ifp->if_serializer);
3286 bge_stats_update_regs(struct bge_softc *sc)
3288 struct ifnet *ifp = &sc->arpcom.ac_if;
3289 struct bge_mac_stats_regs stats;
3293 s = (uint32_t *)&stats;
3294 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
3295 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
3299 ifp->if_collisions +=
3300 (stats.dot3StatsSingleCollisionFrames +
3301 stats.dot3StatsMultipleCollisionFrames +
3302 stats.dot3StatsExcessiveCollisions +
3303 stats.dot3StatsLateCollisions) -
3308 bge_stats_update(struct bge_softc *sc)
3310 struct ifnet *ifp = &sc->arpcom.ac_if;
3313 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
3315 #define READ_STAT(sc, stats, stat) \
3316 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
3318 ifp->if_collisions +=
3319 (READ_STAT(sc, stats,
3320 txstats.dot3StatsSingleCollisionFrames.bge_addr_lo) +
3321 READ_STAT(sc, stats,
3322 txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo) +
3323 READ_STAT(sc, stats,
3324 txstats.dot3StatsExcessiveCollisions.bge_addr_lo) +
3325 READ_STAT(sc, stats,
3326 txstats.dot3StatsLateCollisions.bge_addr_lo)) -
3332 ifp->if_collisions +=
3333 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
3334 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
3335 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
3336 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
3342 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3343 * pointers to descriptors.
3346 bge_encap(struct bge_softc *sc, struct mbuf **m_head0, uint32_t *txidx)
3348 struct bge_tx_bd *d = NULL, *last_d;
3349 uint16_t csum_flags = 0, mss = 0;
3350 bus_dma_segment_t segs[BGE_NSEG_NEW];
3352 int error, maxsegs, nsegs, idx, i;
3353 struct mbuf *m_head = *m_head0, *m_new;
3355 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3356 error = bge_setup_tso(sc, m_head0, &mss, &csum_flags);
3360 } else if (m_head->m_pkthdr.csum_flags & BGE_CSUM_FEATURES) {
3361 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
3362 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3363 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
3364 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3365 if (m_head->m_flags & M_LASTFRAG)
3366 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3367 else if (m_head->m_flags & M_FRAG)
3368 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3372 map = sc->bge_cdata.bge_tx_dmamap[idx];
3374 maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - sc->bge_txrsvd;
3375 KASSERT(maxsegs >= sc->bge_txspare,
3376 ("not enough segments %d", maxsegs));
3378 if (maxsegs > BGE_NSEG_NEW)
3379 maxsegs = BGE_NSEG_NEW;
3382 * Pad outbound frame to BGE_MIN_FRAMELEN for an unusual reason.
3383 * The bge hardware will pad out Tx runts to BGE_MIN_FRAMELEN,
3384 * but when such padded frames employ the bge IP/TCP checksum
3385 * offload, the hardware checksum assist gives incorrect results
3386 * (possibly from incorporating its own padding into the UDP/TCP
3387 * checksum; who knows). If we pad such runts with zeros, the
3388 * onboard checksum comes out correct.
3390 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
3391 m_head->m_pkthdr.len < BGE_MIN_FRAMELEN) {
3392 error = m_devpad(m_head, BGE_MIN_FRAMELEN);
3397 if ((sc->bge_flags & BGE_FLAG_SHORTDMA) && m_head->m_next != NULL) {
3398 m_new = bge_defrag_shortdma(m_head);
3399 if (m_new == NULL) {
3403 *m_head0 = m_head = m_new;
3405 if ((m_head->m_pkthdr.csum_flags & CSUM_TSO) == 0 &&
3406 sc->bge_force_defrag && (sc->bge_flags & BGE_FLAG_PCIE) &&
3407 m_head->m_next != NULL) {
3409 * Forcefully defragment mbuf chain to overcome hardware
3410 * limitation which only support a single outstanding
3411 * DMA read operation. If it fails, keep moving on using
3412 * the original mbuf chain.
3414 m_new = m_defrag(m_head, MB_DONTWAIT);
3416 *m_head0 = m_head = m_new;
3419 error = bus_dmamap_load_mbuf_defrag(sc->bge_cdata.bge_tx_mtag, map,
3420 m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
3425 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
3427 for (i = 0; ; i++) {
3428 d = &sc->bge_ldata.bge_tx_ring[idx];
3430 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
3431 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
3432 d->bge_len = segs[i].ds_len;
3433 d->bge_flags = csum_flags;
3438 BGE_INC(idx, BGE_TX_RING_CNT);
3442 /* Set vlan tag to the first segment of the packet. */
3443 d = &sc->bge_ldata.bge_tx_ring[*txidx];
3444 if (m_head->m_flags & M_VLANTAG) {
3445 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3446 d->bge_vlan_tag = m_head->m_pkthdr.ether_vlantag;
3448 d->bge_vlan_tag = 0;
3451 /* Mark the last segment as end of packet... */
3452 last_d->bge_flags |= BGE_TXBDFLAG_END;
3455 * Insure that the map for this transmission is placed at
3456 * the array index of the last descriptor in this chain.
3458 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
3459 sc->bge_cdata.bge_tx_dmamap[idx] = map;
3460 sc->bge_cdata.bge_tx_chain[idx] = m_head;
3461 sc->bge_txcnt += nsegs;
3463 BGE_INC(idx, BGE_TX_RING_CNT);
3474 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3475 * to the mbuf data regions directly in the transmit descriptors.
3478 bge_start(struct ifnet *ifp)
3480 struct bge_softc *sc = ifp->if_softc;
3481 struct mbuf *m_head = NULL;
3485 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
3488 prodidx = sc->bge_tx_prodidx;
3491 while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3492 m_head = ifq_dequeue(&ifp->if_snd, NULL);
3498 * The code inside the if() block is never reached since we
3499 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3500 * requests to checksum TCP/UDP in a fragmented packet.
3503 * safety overkill. If this is a fragmented packet chain
3504 * with delayed TCP/UDP checksums, then only encapsulate
3505 * it if we have enough descriptors to handle the entire
3507 * (paranoia -- may not actually be needed)
3509 if ((m_head->m_flags & M_FIRSTFRAG) &&
3510 (m_head->m_pkthdr.csum_flags & CSUM_DELAY_DATA)) {
3511 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3512 m_head->m_pkthdr.csum_data + sc->bge_txrsvd) {
3513 ifp->if_flags |= IFF_OACTIVE;
3514 ifq_prepend(&ifp->if_snd, m_head);
3520 * Sanity check: avoid coming within bge_txrsvd
3521 * descriptors of the end of the ring. Also make
3522 * sure there are bge_txspare descriptors for
3523 * jumbo buffers' defragmentation.
3525 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3526 (sc->bge_txrsvd + sc->bge_txspare)) {
3527 ifp->if_flags |= IFF_OACTIVE;
3528 ifq_prepend(&ifp->if_snd, m_head);
3533 * Pack the data into the transmit ring. If we
3534 * don't have room, set the OACTIVE flag and wait
3535 * for the NIC to drain the ring.
3537 if (bge_encap(sc, &m_head, &prodidx)) {
3538 ifp->if_flags |= IFF_OACTIVE;
3544 ETHER_BPF_MTAP(ifp, m_head);
3551 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3552 /* 5700 b2 errata */
3553 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3554 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3556 sc->bge_tx_prodidx = prodidx;
3559 * Set a timeout in case the chip goes out to lunch.
3567 struct bge_softc *sc = xsc;
3568 struct ifnet *ifp = &sc->arpcom.ac_if;
3572 ASSERT_SERIALIZED(ifp->if_serializer);
3574 /* Cancel pending I/O and flush buffers. */
3580 * Init the various state machines, ring
3581 * control blocks and firmware.
3583 if (bge_blockinit(sc)) {
3584 if_printf(ifp, "initialization failure\n");
3590 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3591 ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
3593 /* Load our MAC address. */
3594 m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
3595 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3596 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3598 /* Enable or disable promiscuous mode as needed. */
3601 /* Program multicast filter. */
3605 if (bge_init_rx_ring_std(sc)) {
3606 if_printf(ifp, "RX ring initialization failed\n");
3612 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3613 * memory to insure that the chip has in fact read the first
3614 * entry of the ring.
3616 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3618 for (i = 0; i < 10; i++) {
3620 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3621 if (v == (MCLBYTES - ETHER_ALIGN))
3625 if_printf(ifp, "5705 A0 chip failed to load RX ring\n");
3628 /* Init jumbo RX ring. */
3629 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) {
3630 if (bge_init_rx_ring_jumbo(sc)) {
3631 if_printf(ifp, "Jumbo RX ring initialization failed\n");
3637 /* Init our RX return ring index */
3638 sc->bge_rx_saved_considx = 0;
3641 bge_init_tx_ring(sc);
3643 /* Enable TX MAC state machine lockup fix. */
3644 mode = CSR_READ_4(sc, BGE_TX_MODE);
3645 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
3646 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
3647 /* Turn on transmitter */
3648 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
3650 /* Turn on receiver */
3651 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3654 * Set the number of good frames to receive after RX MBUF
3655 * Low Watermark has been reached. After the RX MAC receives
3656 * this number of frames, it will drop subsequent incoming
3657 * frames until the MBUF High Watermark is reached.
3659 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
3661 if (sc->bge_irq_type == PCI_INTR_TYPE_MSI) {
3663 if_printf(ifp, "MSI_MODE: %#x\n",
3664 CSR_READ_4(sc, BGE_MSI_MODE));
3669 * Linux driver turns it on for all chips supporting MSI?!
3671 if (sc->bge_flags & BGE_FLAG_ONESHOT_MSI) {
3674 * According to 5722-PG101-R,
3675 * BGE_PCIE_TRANSACT_ONESHOT_MSI applies only to
3678 BGE_SETBIT(sc, BGE_PCIE_TRANSACT,
3679 BGE_PCIE_TRANSACT_ONESHOT_MSI);
3683 /* Tell firmware we're alive. */
3684 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3686 /* Enable host interrupts if polling(4) is not enabled. */
3687 PCI_SETBIT(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA, 4);
3688 #ifdef IFPOLL_ENABLE
3689 if (ifp->if_flags & IFF_NPOLLING)
3690 bge_disable_intr(sc);
3693 bge_enable_intr(sc);
3695 bge_ifmedia_upd(ifp);
3697 ifp->if_flags |= IFF_RUNNING;
3698 ifp->if_flags &= ~IFF_OACTIVE;
3700 callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
3704 * Set media options.
3707 bge_ifmedia_upd(struct ifnet *ifp)
3709 struct bge_softc *sc = ifp->if_softc;
3711 /* If this is a 1000baseX NIC, enable the TBI port. */
3712 if (sc->bge_flags & BGE_FLAG_TBI) {
3713 struct ifmedia *ifm = &sc->bge_ifmedia;
3715 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3718 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3721 * The BCM5704 ASIC appears to have a special
3722 * mechanism for programming the autoneg
3723 * advertisement registers in TBI mode.
3725 if (!bge_fake_autoneg &&
3726 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3729 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3730 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3731 sgdig |= BGE_SGDIGCFG_AUTO |
3732 BGE_SGDIGCFG_PAUSE_CAP |
3733 BGE_SGDIGCFG_ASYM_PAUSE;
3734 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3735 sgdig | BGE_SGDIGCFG_SEND);
3737 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3741 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3742 BGE_CLRBIT(sc, BGE_MAC_MODE,
3743 BGE_MACMODE_HALF_DUPLEX);
3745 BGE_SETBIT(sc, BGE_MAC_MODE,
3746 BGE_MACMODE_HALF_DUPLEX);
3753 struct mii_data *mii = device_get_softc(sc->bge_miibus);
3757 if (mii->mii_instance) {
3758 struct mii_softc *miisc;
3760 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3761 mii_phy_reset(miisc);
3766 * Force an interrupt so that we will call bge_link_upd
3767 * if needed and clear any pending link state attention.
3768 * Without this we are not getting any further interrupts
3769 * for link state changes and thus will not UP the link and
3770 * not be able to send in bge_start. The only way to get
3771 * things working was to receive a packet and get an RX
3774 * bge_tick should help for fiber cards and we might not
3775 * need to do this here if BGE_FLAG_TBI is set but as
3776 * we poll for fiber anyway it should not harm.
3778 if (BGE_IS_CRIPPLED(sc))
3779 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3781 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3787 * Report current media status.
3790 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3792 struct bge_softc *sc = ifp->if_softc;
3794 if (sc->bge_flags & BGE_FLAG_TBI) {
3795 ifmr->ifm_status = IFM_AVALID;
3796 ifmr->ifm_active = IFM_ETHER;
3797 if (CSR_READ_4(sc, BGE_MAC_STS) &
3798 BGE_MACSTAT_TBI_PCS_SYNCHED) {
3799 ifmr->ifm_status |= IFM_ACTIVE;
3801 ifmr->ifm_active |= IFM_NONE;
3805 ifmr->ifm_active |= IFM_1000_SX;
3806 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3807 ifmr->ifm_active |= IFM_HDX;
3809 ifmr->ifm_active |= IFM_FDX;
3811 struct mii_data *mii = device_get_softc(sc->bge_miibus);
3814 ifmr->ifm_active = mii->mii_media_active;
3815 ifmr->ifm_status = mii->mii_media_status;
3820 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
3822 struct bge_softc *sc = ifp->if_softc;
3823 struct ifreq *ifr = (struct ifreq *)data;
3824 int mask, error = 0;
3826 ASSERT_SERIALIZED(ifp->if_serializer);
3830 if ((!BGE_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) ||
3831 (BGE_IS_JUMBO_CAPABLE(sc) &&
3832 ifr->ifr_mtu > BGE_JUMBO_MTU)) {
3834 } else if (ifp->if_mtu != ifr->ifr_mtu) {
3835 ifp->if_mtu = ifr->ifr_mtu;
3836 if (ifp->if_flags & IFF_RUNNING)
3841 if (ifp->if_flags & IFF_UP) {
3842 if (ifp->if_flags & IFF_RUNNING) {
3843 mask = ifp->if_flags ^ sc->bge_if_flags;
3846 * If only the state of the PROMISC flag
3847 * changed, then just use the 'set promisc
3848 * mode' command instead of reinitializing
3849 * the entire NIC. Doing a full re-init
3850 * means reloading the firmware and waiting
3851 * for it to start up, which may take a
3852 * second or two. Similarly for ALLMULTI.
3854 if (mask & IFF_PROMISC)
3856 if (mask & IFF_ALLMULTI)
3861 } else if (ifp->if_flags & IFF_RUNNING) {
3864 sc->bge_if_flags = ifp->if_flags;
3868 if (ifp->if_flags & IFF_RUNNING)
3873 if (sc->bge_flags & BGE_FLAG_TBI) {
3874 error = ifmedia_ioctl(ifp, ifr,
3875 &sc->bge_ifmedia, command);
3877 struct mii_data *mii;
3879 mii = device_get_softc(sc->bge_miibus);
3880 error = ifmedia_ioctl(ifp, ifr,
3881 &mii->mii_media, command);
3885 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3886 if (mask & IFCAP_HWCSUM) {
3887 ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
3888 if (ifp->if_capenable & IFCAP_TXCSUM)
3889 ifp->if_hwassist |= BGE_CSUM_FEATURES;
3891 ifp->if_hwassist &= ~BGE_CSUM_FEATURES;
3893 if (mask & IFCAP_TSO) {
3894 ifp->if_capenable ^= IFCAP_TSO;
3895 if (ifp->if_capenable & IFCAP_TSO)
3896 ifp->if_hwassist |= CSUM_TSO;
3898 ifp->if_hwassist &= ~CSUM_TSO;
3902 error = ether_ioctl(ifp, command, data);
3909 bge_watchdog(struct ifnet *ifp)
3911 struct bge_softc *sc = ifp->if_softc;
3913 if_printf(ifp, "watchdog timeout -- resetting\n");
3919 if (!ifq_is_empty(&ifp->if_snd))
3924 * Stop the adapter and free any mbufs allocated to the
3928 bge_stop(struct bge_softc *sc)
3930 struct ifnet *ifp = &sc->arpcom.ac_if;
3932 ASSERT_SERIALIZED(ifp->if_serializer);
3934 callout_stop(&sc->bge_stat_timer);
3937 * Disable all of the receiver blocks
3939 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3940 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3941 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3942 if (BGE_IS_5700_FAMILY(sc))
3943 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3944 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3945 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3946 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3949 * Disable all of the transmit blocks
3951 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3952 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3953 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3954 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3955 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3956 if (BGE_IS_5700_FAMILY(sc))
3957 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3958 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3961 * Shut down all of the memory managers and related
3964 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3965 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3966 if (BGE_IS_5700_FAMILY(sc))
3967 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3968 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3969 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3970 if (!BGE_IS_5705_PLUS(sc)) {
3971 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3972 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3975 /* Disable host interrupts. */
3976 bge_disable_intr(sc);
3979 * Tell firmware we're shutting down.
3981 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3983 /* Free the RX lists. */
3984 bge_free_rx_ring_std(sc);
3986 /* Free jumbo RX list. */
3987 if (BGE_IS_JUMBO_CAPABLE(sc))
3988 bge_free_rx_ring_jumbo(sc);
3990 /* Free TX buffers. */
3991 bge_free_tx_ring(sc);
3993 sc->bge_status_tag = 0;
3995 sc->bge_coal_chg = 0;
3997 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3999 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4004 * Stop all chip I/O so that the kernel's probe routines don't
4005 * get confused by errant DMAs when rebooting.
4008 bge_shutdown(device_t dev)
4010 struct bge_softc *sc = device_get_softc(dev);
4011 struct ifnet *ifp = &sc->arpcom.ac_if;
4013 lwkt_serialize_enter(ifp->if_serializer);
4016 lwkt_serialize_exit(ifp->if_serializer);
4020 bge_suspend(device_t dev)
4022 struct bge_softc *sc = device_get_softc(dev);
4023 struct ifnet *ifp = &sc->arpcom.ac_if;
4025 lwkt_serialize_enter(ifp->if_serializer);
4027 lwkt_serialize_exit(ifp->if_serializer);
4033 bge_resume(device_t dev)
4035 struct bge_softc *sc = device_get_softc(dev);
4036 struct ifnet *ifp = &sc->arpcom.ac_if;
4038 lwkt_serialize_enter(ifp->if_serializer);
4040 if (ifp->if_flags & IFF_UP) {
4043 if (!ifq_is_empty(&ifp->if_snd))
4047 lwkt_serialize_exit(ifp->if_serializer);
4053 bge_setpromisc(struct bge_softc *sc)
4055 struct ifnet *ifp = &sc->arpcom.ac_if;
4057 if (ifp->if_flags & IFF_PROMISC)
4058 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
4060 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
4064 bge_dma_free(struct bge_softc *sc)
4068 /* Destroy RX mbuf DMA stuffs. */
4069 if (sc->bge_cdata.bge_rx_mtag != NULL) {
4070 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
4071 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
4072 sc->bge_cdata.bge_rx_std_dmamap[i]);
4074 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
4075 sc->bge_cdata.bge_rx_tmpmap);
4076 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
4079 /* Destroy TX mbuf DMA stuffs. */
4080 if (sc->bge_cdata.bge_tx_mtag != NULL) {
4081 for (i = 0; i < BGE_TX_RING_CNT; i++) {
4082 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
4083 sc->bge_cdata.bge_tx_dmamap[i]);
4085 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
4088 /* Destroy standard RX ring */
4089 bge_dma_block_free(sc->bge_cdata.bge_rx_std_ring_tag,
4090 sc->bge_cdata.bge_rx_std_ring_map,
4091 sc->bge_ldata.bge_rx_std_ring);
4093 if (BGE_IS_JUMBO_CAPABLE(sc))
4094 bge_free_jumbo_mem(sc);
4096 /* Destroy RX return ring */
4097 bge_dma_block_free(sc->bge_cdata.bge_rx_return_ring_tag,
4098 sc->bge_cdata.bge_rx_return_ring_map,
4099 sc->bge_ldata.bge_rx_return_ring);
4101 /* Destroy TX ring */
4102 bge_dma_block_free(sc->bge_cdata.bge_tx_ring_tag,
4103 sc->bge_cdata.bge_tx_ring_map,
4104 sc->bge_ldata.bge_tx_ring);
4106 /* Destroy status block */
4107 bge_dma_block_free(sc->bge_cdata.bge_status_tag,
4108 sc->bge_cdata.bge_status_map,
4109 sc->bge_ldata.bge_status_block);
4111 /* Destroy statistics block */
4112 bge_dma_block_free(sc->bge_cdata.bge_stats_tag,
4113 sc->bge_cdata.bge_stats_map,
4114 sc->bge_ldata.bge_stats);
4116 /* Destroy the parent tag */
4117 if (sc->bge_cdata.bge_parent_tag != NULL)
4118 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
4122 bge_dma_alloc(struct bge_softc *sc)
4124 struct ifnet *ifp = &sc->arpcom.ac_if;
4129 lowaddr = BUS_SPACE_MAXADDR;
4130 if (sc->bge_flags & BGE_FLAG_MAXADDR_40BIT)
4131 lowaddr = BGE_DMA_MAXADDR_40BIT;
4134 * Allocate the parent bus DMA tag appropriate for PCI.
4136 * All of the NetExtreme/NetLink controllers have 4GB boundary
4138 * Whenever an address crosses a multiple of the 4GB boundary
4139 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
4140 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
4141 * state machine will lockup and cause the device to hang.
4143 error = bus_dma_tag_create(NULL, 1, BGE_DMA_BOUNDARY_4G,
4144 lowaddr, BUS_SPACE_MAXADDR,
4146 BUS_SPACE_MAXSIZE_32BIT, 0,
4147 BUS_SPACE_MAXSIZE_32BIT,
4148 0, &sc->bge_cdata.bge_parent_tag);
4150 if_printf(ifp, "could not allocate parent dma tag\n");
4155 * Create DMA tag and maps for RX mbufs.
4157 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0,
4158 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
4159 NULL, NULL, MCLBYTES, 1, MCLBYTES,
4160 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
4161 &sc->bge_cdata.bge_rx_mtag);
4163 if_printf(ifp, "could not allocate RX mbuf dma tag\n");
4167 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag,
4168 BUS_DMA_WAITOK, &sc->bge_cdata.bge_rx_tmpmap);
4170 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
4171 sc->bge_cdata.bge_rx_mtag = NULL;
4175 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
4176 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag,
4178 &sc->bge_cdata.bge_rx_std_dmamap[i]);
4182 for (j = 0; j < i; ++j) {
4183 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
4184 sc->bge_cdata.bge_rx_std_dmamap[j]);
4186 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
4187 sc->bge_cdata.bge_rx_mtag = NULL;
4189 if_printf(ifp, "could not create DMA map for RX\n");
4195 * Create DMA tag and maps for TX mbufs.
4197 if (sc->bge_flags & BGE_FLAG_TSO)
4198 txmaxsz = IP_MAXPACKET + sizeof(struct ether_vlan_header);
4200 txmaxsz = BGE_JUMBO_FRAMELEN;
4201 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0,
4202 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
4204 txmaxsz, BGE_NSEG_NEW, PAGE_SIZE,
4205 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK |
4207 &sc->bge_cdata.bge_tx_mtag);
4209 if_printf(ifp, "could not allocate TX mbuf dma tag\n");
4213 for (i = 0; i < BGE_TX_RING_CNT; i++) {
4214 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag,
4215 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
4216 &sc->bge_cdata.bge_tx_dmamap[i]);
4220 for (j = 0; j < i; ++j) {
4221 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
4222 sc->bge_cdata.bge_tx_dmamap[j]);
4224 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
4225 sc->bge_cdata.bge_tx_mtag = NULL;
4227 if_printf(ifp, "could not create DMA map for TX\n");
4233 * Create DMA stuffs for standard RX ring.
4235 error = bge_dma_block_alloc(sc, BGE_STD_RX_RING_SZ,
4236 &sc->bge_cdata.bge_rx_std_ring_tag,
4237 &sc->bge_cdata.bge_rx_std_ring_map,
4238 (void *)&sc->bge_ldata.bge_rx_std_ring,
4239 &sc->bge_ldata.bge_rx_std_ring_paddr);
4241 if_printf(ifp, "could not create std RX ring\n");
4246 * Create jumbo buffer pool.
4248 if (BGE_IS_JUMBO_CAPABLE(sc)) {
4249 error = bge_alloc_jumbo_mem(sc);
4251 if_printf(ifp, "could not create jumbo buffer pool\n");
4257 * Create DMA stuffs for RX return ring.
4259 error = bge_dma_block_alloc(sc,
4260 BGE_RX_RTN_RING_SZ(sc->bge_return_ring_cnt),
4261 &sc->bge_cdata.bge_rx_return_ring_tag,
4262 &sc->bge_cdata.bge_rx_return_ring_map,
4263 (void *)&sc->bge_ldata.bge_rx_return_ring,
4264 &sc->bge_ldata.bge_rx_return_ring_paddr);
4266 if_printf(ifp, "could not create RX ret ring\n");
4271 * Create DMA stuffs for TX ring.
4273 error = bge_dma_block_alloc(sc, BGE_TX_RING_SZ,
4274 &sc->bge_cdata.bge_tx_ring_tag,
4275 &sc->bge_cdata.bge_tx_ring_map,
4276 (void *)&sc->bge_ldata.bge_tx_ring,
4277 &sc->bge_ldata.bge_tx_ring_paddr);
4279 if_printf(ifp, "could not create TX ring\n");
4284 * Create DMA stuffs for status block.
4286 error = bge_dma_block_alloc(sc, BGE_STATUS_BLK_SZ,
4287 &sc->bge_cdata.bge_status_tag,
4288 &sc->bge_cdata.bge_status_map,
4289 (void *)&sc->bge_ldata.bge_status_block,
4290 &sc->bge_ldata.bge_status_block_paddr);
4292 if_printf(ifp, "could not create status block\n");
4297 * Create DMA stuffs for statistics block.
4299 error = bge_dma_block_alloc(sc, BGE_STATS_SZ,
4300 &sc->bge_cdata.bge_stats_tag,
4301 &sc->bge_cdata.bge_stats_map,
4302 (void *)&sc->bge_ldata.bge_stats,
4303 &sc->bge_ldata.bge_stats_paddr);
4305 if_printf(ifp, "could not create stats block\n");
4312 bge_dma_block_alloc(struct bge_softc *sc, bus_size_t size, bus_dma_tag_t *tag,
4313 bus_dmamap_t *map, void **addr, bus_addr_t *paddr)
4318 error = bus_dmamem_coherent(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0,
4319 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
4320 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
4324 *tag = dmem.dmem_tag;
4325 *map = dmem.dmem_map;
4326 *addr = dmem.dmem_addr;
4327 *paddr = dmem.dmem_busaddr;
4333 bge_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr)
4336 bus_dmamap_unload(tag, map);
4337 bus_dmamem_free(tag, addr, map);
4338 bus_dma_tag_destroy(tag);
4343 * Grrr. The link status word in the status block does
4344 * not work correctly on the BCM5700 rev AX and BX chips,
4345 * according to all available information. Hence, we have
4346 * to enable MII interrupts in order to properly obtain
4347 * async link changes. Unfortunately, this also means that
4348 * we have to read the MAC status register to detect link
4349 * changes, thereby adding an additional register access to
4350 * the interrupt handler.
4352 * XXX: perhaps link state detection procedure used for
4353 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
4356 bge_bcm5700_link_upd(struct bge_softc *sc, uint32_t status __unused)
4358 struct ifnet *ifp = &sc->arpcom.ac_if;
4359 struct mii_data *mii = device_get_softc(sc->bge_miibus);
4363 if (!sc->bge_link &&
4364 (mii->mii_media_status & IFM_ACTIVE) &&
4365 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4368 if_printf(ifp, "link UP\n");
4369 } else if (sc->bge_link &&
4370 (!(mii->mii_media_status & IFM_ACTIVE) ||
4371 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4374 if_printf(ifp, "link DOWN\n");
4377 /* Clear the interrupt. */
4378 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_MI_INTERRUPT);
4379 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
4380 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, BRGPHY_INTRS);
4384 bge_tbi_link_upd(struct bge_softc *sc, uint32_t status)
4386 struct ifnet *ifp = &sc->arpcom.ac_if;
4388 #define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE)
4391 * Sometimes PCS encoding errors are detected in
4392 * TBI mode (on fiber NICs), and for some reason
4393 * the chip will signal them as link changes.
4394 * If we get a link change event, but the 'PCS
4395 * encoding error' bit in the MAC status register
4396 * is set, don't bother doing a link check.
4397 * This avoids spurious "gigabit link up" messages
4398 * that sometimes appear on fiber NICs during
4399 * periods of heavy traffic.
4401 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
4402 if (!sc->bge_link) {
4404 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
4405 BGE_CLRBIT(sc, BGE_MAC_MODE,
4406 BGE_MACMODE_TBI_SEND_CFGS);
4408 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
4411 if_printf(ifp, "link UP\n");
4413 ifp->if_link_state = LINK_STATE_UP;
4414 if_link_state_change(ifp);
4416 } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) {
4421 if_printf(ifp, "link DOWN\n");
4423 ifp->if_link_state = LINK_STATE_DOWN;
4424 if_link_state_change(ifp);
4428 #undef PCS_ENCODE_ERR
4430 /* Clear the attention. */
4431 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
4432 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
4433 BGE_MACSTAT_LINK_CHANGED);
4437 bge_copper_link_upd(struct bge_softc *sc, uint32_t status __unused)
4439 struct ifnet *ifp = &sc->arpcom.ac_if;
4440 struct mii_data *mii = device_get_softc(sc->bge_miibus);
4443 bge_miibus_statchg(sc->bge_dev);
4447 if_printf(ifp, "link UP\n");
4449 if_printf(ifp, "link DOWN\n");
4452 /* Clear the attention. */
4453 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
4454 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
4455 BGE_MACSTAT_LINK_CHANGED);
4459 bge_autopoll_link_upd(struct bge_softc *sc, uint32_t status __unused)
4461 struct ifnet *ifp = &sc->arpcom.ac_if;
4462 struct mii_data *mii = device_get_softc(sc->bge_miibus);
4466 if (!sc->bge_link &&
4467 (mii->mii_media_status & IFM_ACTIVE) &&
4468 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4471 if_printf(ifp, "link UP\n");
4472 } else if (sc->bge_link &&
4473 (!(mii->mii_media_status & IFM_ACTIVE) ||
4474 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4477 if_printf(ifp, "link DOWN\n");
4480 /* Clear the attention. */
4481 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
4482 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
4483 BGE_MACSTAT_LINK_CHANGED);
4487 bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS)
4489 struct bge_softc *sc = arg1;
4491 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
4492 &sc->bge_rx_coal_ticks,
4493 BGE_RX_COAL_TICKS_MIN, BGE_RX_COAL_TICKS_MAX,
4494 BGE_RX_COAL_TICKS_CHG);
4498 bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS)
4500 struct bge_softc *sc = arg1;
4502 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
4503 &sc->bge_tx_coal_ticks,
4504 BGE_TX_COAL_TICKS_MIN, BGE_TX_COAL_TICKS_MAX,
4505 BGE_TX_COAL_TICKS_CHG);
4509 bge_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS)
4511 struct bge_softc *sc = arg1;
4513 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
4514 &sc->bge_rx_coal_bds,
4515 BGE_RX_COAL_BDS_MIN, BGE_RX_COAL_BDS_MAX,
4516 BGE_RX_COAL_BDS_CHG);
4520 bge_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS)
4522 struct bge_softc *sc = arg1;
4524 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
4525 &sc->bge_tx_coal_bds,
4526 BGE_TX_COAL_BDS_MIN, BGE_TX_COAL_BDS_MAX,
4527 BGE_TX_COAL_BDS_CHG);
4531 bge_sysctl_rx_coal_ticks_int(SYSCTL_HANDLER_ARGS)
4533 struct bge_softc *sc = arg1;
4535 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
4536 &sc->bge_rx_coal_ticks_int,
4537 BGE_RX_COAL_TICKS_MIN, BGE_RX_COAL_TICKS_MAX,
4538 BGE_RX_COAL_TICKS_INT_CHG);
4542 bge_sysctl_tx_coal_ticks_int(SYSCTL_HANDLER_ARGS)
4544 struct bge_softc *sc = arg1;
4546 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
4547 &sc->bge_tx_coal_ticks_int,
4548 BGE_TX_COAL_TICKS_MIN, BGE_TX_COAL_TICKS_MAX,
4549 BGE_TX_COAL_TICKS_INT_CHG);
4553 bge_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS)
4555 struct bge_softc *sc = arg1;
4557 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
4558 &sc->bge_rx_coal_bds_int,
4559 BGE_RX_COAL_BDS_MIN, BGE_RX_COAL_BDS_MAX,
4560 BGE_RX_COAL_BDS_INT_CHG);
4564 bge_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS)
4566 struct bge_softc *sc = arg1;
4568 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
4569 &sc->bge_tx_coal_bds_int,
4570 BGE_TX_COAL_BDS_MIN, BGE_TX_COAL_BDS_MAX,
4571 BGE_TX_COAL_BDS_INT_CHG);
4575 bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal,
4576 int coal_min, int coal_max, uint32_t coal_chg_mask)
4578 struct bge_softc *sc = arg1;
4579 struct ifnet *ifp = &sc->arpcom.ac_if;
4582 lwkt_serialize_enter(ifp->if_serializer);
4585 error = sysctl_handle_int(oidp, &v, 0, req);
4586 if (!error && req->newptr != NULL) {
4587 if (v < coal_min || v > coal_max) {
4591 sc->bge_coal_chg |= coal_chg_mask;
4595 lwkt_serialize_exit(ifp->if_serializer);
4600 bge_coal_change(struct bge_softc *sc)
4602 struct ifnet *ifp = &sc->arpcom.ac_if;
4605 ASSERT_SERIALIZED(ifp->if_serializer);
4607 if (sc->bge_coal_chg & BGE_RX_COAL_TICKS_CHG) {
4608 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS,
4609 sc->bge_rx_coal_ticks);
4611 val = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
4614 if_printf(ifp, "rx_coal_ticks -> %u\n",
4615 sc->bge_rx_coal_ticks);
4619 if (sc->bge_coal_chg & BGE_TX_COAL_TICKS_CHG) {
4620 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS,
4621 sc->bge_tx_coal_ticks);
4623 val = CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS);
4626 if_printf(ifp, "tx_coal_ticks -> %u\n",
4627 sc->bge_tx_coal_ticks);
4631 if (sc->bge_coal_chg & BGE_RX_COAL_BDS_CHG) {
4632 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS,
4633 sc->bge_rx_coal_bds);
4635 val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
4638 if_printf(ifp, "rx_coal_bds -> %u\n",
4639 sc->bge_rx_coal_bds);
4643 if (sc->bge_coal_chg & BGE_TX_COAL_BDS_CHG) {
4644 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS,
4645 sc->bge_tx_coal_bds);
4647 val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS);
4650 if_printf(ifp, "tx_max_coal_bds -> %u\n",
4651 sc->bge_tx_coal_bds);
4655 if (sc->bge_coal_chg & BGE_RX_COAL_TICKS_INT_CHG) {
4656 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT,
4657 sc->bge_rx_coal_ticks_int);
4659 val = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS_INT);
4662 if_printf(ifp, "rx_coal_ticks_int -> %u\n",
4663 sc->bge_rx_coal_ticks_int);
4667 if (sc->bge_coal_chg & BGE_TX_COAL_TICKS_INT_CHG) {
4668 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT,
4669 sc->bge_tx_coal_ticks_int);
4671 val = CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS_INT);
4674 if_printf(ifp, "tx_coal_ticks_int -> %u\n",
4675 sc->bge_tx_coal_ticks_int);
4679 if (sc->bge_coal_chg & BGE_RX_COAL_BDS_INT_CHG) {
4680 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT,
4681 sc->bge_rx_coal_bds_int);
4683 val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT);
4686 if_printf(ifp, "rx_coal_bds_int -> %u\n",
4687 sc->bge_rx_coal_bds_int);
4691 if (sc->bge_coal_chg & BGE_TX_COAL_BDS_INT_CHG) {
4692 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT,
4693 sc->bge_tx_coal_bds_int);
4695 val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT);
4698 if_printf(ifp, "tx_coal_bds_int -> %u\n",
4699 sc->bge_tx_coal_bds_int);
4703 sc->bge_coal_chg = 0;
4707 bge_enable_intr(struct bge_softc *sc)
4709 struct ifnet *ifp = &sc->arpcom.ac_if;
4711 lwkt_serialize_handler_enable(ifp->if_serializer);
4716 bge_writembx(sc, BGE_MBX_IRQ0_LO, sc->bge_status_tag << 24);
4717 if (sc->bge_flags & BGE_FLAG_ONESHOT_MSI) {
4718 /* XXX Linux driver */
4719 bge_writembx(sc, BGE_MBX_IRQ0_LO, sc->bge_status_tag << 24);
4723 * Unmask the interrupt when we stop polling.
4725 PCI_CLRBIT(sc->bge_dev, BGE_PCI_MISC_CTL,
4726 BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
4729 * Trigger another interrupt, since above writing
4730 * to interrupt mailbox0 may acknowledge pending
4733 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4737 bge_disable_intr(struct bge_softc *sc)
4739 struct ifnet *ifp = &sc->arpcom.ac_if;
4742 * Mask the interrupt when we start polling.
4744 PCI_SETBIT(sc->bge_dev, BGE_PCI_MISC_CTL,
4745 BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
4748 * Acknowledge possible asserted interrupt.
4750 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4752 sc->bge_npoll.ifpc_stcount = 0;
4754 lwkt_serialize_handler_disable(ifp->if_serializer);
4758 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
4763 mac_addr = bge_readmem_ind(sc, 0x0c14);
4764 if ((mac_addr >> 16) == 0x484b) {
4765 ether_addr[0] = (uint8_t)(mac_addr >> 8);
4766 ether_addr[1] = (uint8_t)mac_addr;
4767 mac_addr = bge_readmem_ind(sc, 0x0c18);
4768 ether_addr[2] = (uint8_t)(mac_addr >> 24);
4769 ether_addr[3] = (uint8_t)(mac_addr >> 16);
4770 ether_addr[4] = (uint8_t)(mac_addr >> 8);
4771 ether_addr[5] = (uint8_t)mac_addr;
4778 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
4780 int mac_offset = BGE_EE_MAC_OFFSET;
4782 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
4783 mac_offset = BGE_EE_MAC_OFFSET_5906;
4785 return bge_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN);
4789 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
4791 if (sc->bge_flags & BGE_FLAG_NO_EEPROM)
4794 return bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
4799 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
4801 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
4802 /* NOTE: Order is critical */
4804 bge_get_eaddr_nvram,
4805 bge_get_eaddr_eeprom,
4808 const bge_eaddr_fcn_t *func;
4810 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
4811 if ((*func)(sc, eaddr) == 0)
4814 return (*func == NULL ? ENXIO : 0);
4818 * NOTE: 'm' is not freed upon failure
4821 bge_defrag_shortdma(struct mbuf *m)
4827 * If device receive two back-to-back send BDs with less than
4828 * or equal to 8 total bytes then the device may hang. The two
4829 * back-to-back send BDs must in the same frame for this failure
4830 * to occur. Scan mbuf chains and see whether two back-to-back
4831 * send BDs are there. If this is the case, allocate new mbuf
4832 * and copy the frame to workaround the silicon bug.
4834 for (n = m, found = 0; n != NULL; n = n->m_next) {
4845 n = m_defrag(m, MB_DONTWAIT);
4852 bge_stop_block(struct bge_softc *sc, bus_size_t reg, uint32_t bit)
4856 BGE_CLRBIT(sc, reg, bit);
4857 for (i = 0; i < BGE_TIMEOUT; i++) {
4858 if ((CSR_READ_4(sc, reg) & bit) == 0)
4865 bge_link_poll(struct bge_softc *sc)
4869 status = CSR_READ_4(sc, BGE_MAC_STS);
4870 if ((status & sc->bge_link_chg) || sc->bge_link_evt) {
4871 sc->bge_link_evt = 0;
4872 sc->bge_link_upd(sc, status);
4877 bge_enable_msi(struct bge_softc *sc)
4881 msi_mode = CSR_READ_4(sc, BGE_MSI_MODE);
4882 msi_mode |= BGE_MSIMODE_ENABLE;
4883 if (sc->bge_flags & BGE_FLAG_ONESHOT_MSI) {
4885 * According to all of the datasheets that are publicly
4886 * available, bit 5 of the MSI_MODE is defined to be
4887 * "MSI FIFO Underrun Attn" for BCM5755+ and BCM5906, on
4888 * which "oneshot MSI" is enabled. However, it is always
4889 * safe to clear it here.
4891 msi_mode &= ~BGE_MSIMODE_ONESHOT_DISABLE;
4893 CSR_WRITE_4(sc, BGE_MSI_MODE, msi_mode);
4897 bge_setup_tso(struct bge_softc *sc, struct mbuf **mp,
4898 uint16_t *mss0, uint16_t *flags0)
4903 int thoff, iphlen, hoff, hlen;
4904 uint16_t flags, mss;
4907 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
4909 hoff = m->m_pkthdr.csum_lhlen;
4910 iphlen = m->m_pkthdr.csum_iphlen;
4911 thoff = m->m_pkthdr.csum_thlen;
4913 KASSERT(hoff > 0, ("invalid ether header len"));
4914 KASSERT(iphlen > 0, ("invalid ip header len"));
4915 KASSERT(thoff > 0, ("invalid tcp header len"));
4917 if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
4918 m = m_pullup(m, hoff + iphlen + thoff);
4925 ip = mtodoff(m, struct ip *, hoff);
4926 th = mtodoff(m, struct tcphdr *, hoff + iphlen);
4928 mss = m->m_pkthdr.tso_segsz;
4929 flags = BGE_TXBDFLAG_CPU_PRE_DMA | BGE_TXBDFLAG_CPU_POST_DMA;
4931 ip->ip_len = htons(mss + iphlen + thoff);
4934 hlen = (iphlen + thoff) >> 2;
4935 mss |= (hlen << 11);