network code: Convert if_multiaddrs from LIST to TAILQ.
[dragonfly.git] / sys / dev / netif / bge / if_bge.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 *
011c0f93 33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
591dfc77 34 * $DragonFly: src/sys/dev/netif/bge/if_bge.c,v 1.111 2008/10/22 14:24:24 sephe Exp $
1de703da 35 *
984263bc
MD
36 */
37
38/*
39 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
40 *
41 * Written by Bill Paul <wpaul@windriver.com>
42 * Senior Engineer, Wind River Systems
43 */
44
45/*
46 * The Broadcom BCM5700 is based on technology originally developed by
47 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
49 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
51 * frames, highly configurable RX filtering, and 16 RX and TX queues
52 * (which, along with RX filter rules, can be used for QOS applications).
53 * Other features, such as TCP segmentation, may be available as part
54 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55 * firmware images can be stored in hardware and need not be compiled
56 * into the driver.
57 *
58 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
60 *
61 * The BCM5701 is a single-chip solution incorporating both the BCM5700
62 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63 * does not support external SSRAM.
64 *
65 * Broadcom also produces a variation of the BCM5700 under the "Altima"
66 * brand name, which is functionally similar but lacks PCI-X support.
67 *
68 * Without external SSRAM, you can only have at most 4 TX rings,
69 * and the use of the mini RX ring is disabled. This seems to imply
70 * that these features are simply not available on the BCM5701. As a
71 * result, this driver does not implement any support for the mini RX
72 * ring.
73 */
74
315fe0ee 75#include "opt_polling.h"
a7db2caa 76
984263bc 77#include <sys/param.h>
62be1357 78#include <sys/bus.h>
20c9a969 79#include <sys/endian.h>
62be1357 80#include <sys/kernel.h>
6b880771 81#include <sys/ktr.h>
9db4b353 82#include <sys/interrupt.h>
984263bc
MD
83#include <sys/mbuf.h>
84#include <sys/malloc.h>
984263bc 85#include <sys/queue.h>
62be1357 86#include <sys/rman.h>
16dca0df 87#include <sys/serialize.h>
62be1357
SZ
88#include <sys/socket.h>
89#include <sys/sockio.h>
055d06f0 90#include <sys/sysctl.h>
984263bc 91
62be1357
SZ
92#include <net/bpf.h>
93#include <net/ethernet.h>
984263bc
MD
94#include <net/if.h>
95#include <net/if_arp.h>
984263bc
MD
96#include <net/if_dl.h>
97#include <net/if_media.h>
984263bc 98#include <net/if_types.h>
62be1357 99#include <net/ifq_var.h>
1f2de5d4 100#include <net/vlan/if_vlan_var.h>
b637f170 101#include <net/vlan/if_vlan_ether.h>
984263bc 102
1f2de5d4
MD
103#include <dev/netif/mii_layer/mii.h>
104#include <dev/netif/mii_layer/miivar.h>
1f2de5d4 105#include <dev/netif/mii_layer/brgphyreg.h>
984263bc 106
f952ab63 107#include <bus/pci/pcidevs.h>
1f2de5d4
MD
108#include <bus/pci/pcireg.h>
109#include <bus/pci/pcivar.h>
984263bc 110
62be1357
SZ
111#include <dev/netif/bge/if_bgereg.h>
112
113/* "device miibus" required. See GENERIC if you get errors here. */
114#include "miibus_if.h"
984263bc
MD
115
116#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
cb623c48 117#define BGE_MIN_FRAME 60
984263bc 118
d265721a 119static const struct bge_type bge_devs[] = {
0ecb11d7
SZ
120 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996,
121 "3COM 3C996 Gigabit Ethernet" },
122
f952ab63 123 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700,
9a6ee7e2
JS
124 "Alteon BCM5700 Gigabit Ethernet" },
125 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701,
126 "Alteon BCM5701 Gigabit Ethernet" },
0ecb11d7
SZ
127
128 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000,
129 "Altima AC1000 Gigabit Ethernet" },
130 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001,
131 "Altima AC1002 Gigabit Ethernet" },
132 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100,
133 "Altima AC9100 Gigabit Ethernet" },
134
135 { PCI_VENDOR_APPLE, PCI_PRODUCT_APPLE_BCM5701,
136 "Apple BCM5701 Gigabit Ethernet" },
137
f952ab63 138 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700,
984263bc 139 "Broadcom BCM5700 Gigabit Ethernet" },
f952ab63 140 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701,
984263bc 141 "Broadcom BCM5701 Gigabit Ethernet" },
0ecb11d7
SZ
142 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702,
143 "Broadcom BCM5702 Gigabit Ethernet" },
f952ab63 144 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X,
984263bc 145 "Broadcom BCM5702X Gigabit Ethernet" },
9a6ee7e2
JS
146 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT,
147 "Broadcom BCM5702 Gigabit Ethernet" },
0ecb11d7
SZ
148 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703,
149 "Broadcom BCM5703 Gigabit Ethernet" },
f952ab63
JS
150 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X,
151 "Broadcom BCM5703X Gigabit Ethernet" },
9a6ee7e2
JS
152 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703A3,
153 "Broadcom BCM5703 Gigabit Ethernet" },
f952ab63 154 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C,
984263bc 155 "Broadcom BCM5704C Dual Gigabit Ethernet" },
f952ab63 156 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S,
984263bc 157 "Broadcom BCM5704S Dual Gigabit Ethernet" },
0ecb11d7
SZ
158 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S_ALT,
159 "Broadcom BCM5704S Dual Gigabit Ethernet" },
f952ab63 160 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705,
7e40b8c5 161 "Broadcom BCM5705 Gigabit Ethernet" },
0ecb11d7
SZ
162 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705F,
163 "Broadcom BCM5705F Gigabit Ethernet" },
9a6ee7e2
JS
164 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K,
165 "Broadcom BCM5705K Gigabit Ethernet" },
f952ab63 166 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M,
7e40b8c5 167 "Broadcom BCM5705M Gigabit Ethernet" },
9a6ee7e2 168 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT,
7e40b8c5 169 "Broadcom BCM5705M Gigabit Ethernet" },
92decf65 170 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714,
9a6ee7e2 171 "Broadcom BCM5714C Gigabit Ethernet" },
0ecb11d7
SZ
172 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714S,
173 "Broadcom BCM5714S Gigabit Ethernet" },
174 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715,
175 "Broadcom BCM5715 Gigabit Ethernet" },
176 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715S,
177 "Broadcom BCM5715S Gigabit Ethernet" },
178 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720,
179 "Broadcom BCM5720 Gigabit Ethernet" },
9a6ee7e2
JS
180 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721,
181 "Broadcom BCM5721 Gigabit Ethernet" },
0ecb11d7
SZ
182 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5722,
183 "Broadcom BCM5722 Gigabit Ethernet" },
9a6ee7e2
JS
184 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750,
185 "Broadcom BCM5750 Gigabit Ethernet" },
186 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750M,
187 "Broadcom BCM5750M Gigabit Ethernet" },
b7bef88c
JS
188 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751,
189 "Broadcom BCM5751 Gigabit Ethernet" },
0ecb11d7
SZ
190 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751F,
191 "Broadcom BCM5751F Gigabit Ethernet" },
9a6ee7e2
JS
192 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M,
193 "Broadcom BCM5751M Gigabit Ethernet" },
bae5fe9a
SZ
194 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752,
195 "Broadcom BCM5752 Gigabit Ethernet" },
0ecb11d7
SZ
196 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752M,
197 "Broadcom BCM5752M Gigabit Ethernet" },
198 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753,
199 "Broadcom BCM5753 Gigabit Ethernet" },
200 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753F,
201 "Broadcom BCM5753F Gigabit Ethernet" },
202 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753M,
203 "Broadcom BCM5753M Gigabit Ethernet" },
204 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754,
205 "Broadcom BCM5754 Gigabit Ethernet" },
206 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754M,
207 "Broadcom BCM5754M Gigabit Ethernet" },
208 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755,
209 "Broadcom BCM5755 Gigabit Ethernet" },
210 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755M,
211 "Broadcom BCM5755M Gigabit Ethernet" },
212 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5756,
213 "Broadcom BCM5756 Gigabit Ethernet" },
214 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780,
215 "Broadcom BCM5780 Gigabit Ethernet" },
216 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780S,
217 "Broadcom BCM5780S Gigabit Ethernet" },
218 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5781,
219 "Broadcom BCM5781 Gigabit Ethernet" },
f952ab63 220 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782,
7e40b8c5 221 "Broadcom BCM5782 Gigabit Ethernet" },
0ecb11d7
SZ
222 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5786,
223 "Broadcom BCM5786 Gigabit Ethernet" },
224 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787,
225 "Broadcom BCM5787 Gigabit Ethernet" },
226 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787F,
227 "Broadcom BCM5787F Gigabit Ethernet" },
228 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787M,
229 "Broadcom BCM5787M Gigabit Ethernet" },
9a6ee7e2 230 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788,
f952ab63 231 "Broadcom BCM5788 Gigabit Ethernet" },
9a6ee7e2
JS
232 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789,
233 "Broadcom BCM5789 Gigabit Ethernet" },
f952ab63
JS
234 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901,
235 "Broadcom BCM5901 Fast Ethernet" },
236 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2,
237 "Broadcom BCM5901A2 Fast Ethernet" },
0ecb11d7
SZ
238 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5903M,
239 "Broadcom BCM5903M Fast Ethernet" },
591dfc77
SZ
240 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906,
241 "Broadcom BCM5906 Fast Ethernet"},
242 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906M,
243 "Broadcom BCM5906M Fast Ethernet"},
0ecb11d7 244
f952ab63 245 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
984263bc 246 "SysKonnect Gigabit Ethernet" },
0ecb11d7 247
984263bc
MD
248 { 0, 0, NULL }
249};
250
0ecb11d7
SZ
251#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
252#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
253#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
254#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
255#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
256
591dfc77
SZ
257typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
258
33c39a69
JS
259static int bge_probe(device_t);
260static int bge_attach(device_t);
261static int bge_detach(device_t);
33c39a69
JS
262static void bge_txeof(struct bge_softc *);
263static void bge_rxeof(struct bge_softc *);
264
265static void bge_tick(void *);
266static void bge_stats_update(struct bge_softc *);
267static void bge_stats_update_regs(struct bge_softc *);
4a607ed6 268static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
33c39a69 269
315fe0ee
MD
270#ifdef DEVICE_POLLING
271static void bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
272#endif
33c39a69 273static void bge_intr(void *);
ba39cc82
SZ
274static void bge_enable_intr(struct bge_softc *);
275static void bge_disable_intr(struct bge_softc *);
33c39a69
JS
276static void bge_start(struct ifnet *);
277static int bge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
278static void bge_init(void *);
279static void bge_stop(struct bge_softc *);
280static void bge_watchdog(struct ifnet *);
281static void bge_shutdown(device_t);
aa65409c
SZ
282static int bge_suspend(device_t);
283static int bge_resume(device_t);
33c39a69
JS
284static int bge_ifmedia_upd(struct ifnet *);
285static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
286
591dfc77
SZ
287static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
288static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
289
33c39a69
JS
290static uint8_t bge_eeprom_getbyte(struct bge_softc *, uint32_t, uint8_t *);
291static int bge_read_eeprom(struct bge_softc *, caddr_t, uint32_t, size_t);
292
33c39a69 293static void bge_setmulti(struct bge_softc *);
6439b28a 294static void bge_setpromisc(struct bge_softc *);
33c39a69 295
33c39a69
JS
296static int bge_alloc_jumbo_mem(struct bge_softc *);
297static void bge_free_jumbo_mem(struct bge_softc *);
2aa9b12f
JS
298static struct bge_jslot
299 *bge_jalloc(struct bge_softc *);
300static void bge_jfree(void *);
301static void bge_jref(void *);
1436f9a0
SZ
302static int bge_newbuf_std(struct bge_softc *, int, int);
303static int bge_newbuf_jumbo(struct bge_softc *, int, int);
304static void bge_setup_rxdesc_std(struct bge_softc *, int);
305static void bge_setup_rxdesc_jumbo(struct bge_softc *, int);
33c39a69
JS
306static int bge_init_rx_ring_std(struct bge_softc *);
307static void bge_free_rx_ring_std(struct bge_softc *);
308static int bge_init_rx_ring_jumbo(struct bge_softc *);
309static void bge_free_rx_ring_jumbo(struct bge_softc *);
310static void bge_free_tx_ring(struct bge_softc *);
311static int bge_init_tx_ring(struct bge_softc *);
312
313static int bge_chipinit(struct bge_softc *);
314static int bge_blockinit(struct bge_softc *);
984263bc 315
33c39a69
JS
316static uint32_t bge_readmem_ind(struct bge_softc *, uint32_t);
317static void bge_writemem_ind(struct bge_softc *, uint32_t, uint32_t);
984263bc 318#ifdef notdef
33c39a69 319static uint32_t bge_readreg_ind(struct bge_softc *, uint32_t);
984263bc 320#endif
33c39a69 321static void bge_writereg_ind(struct bge_softc *, uint32_t, uint32_t);
0ecb11d7 322static void bge_writemem_direct(struct bge_softc *, uint32_t, uint32_t);
591dfc77 323static void bge_writembx(struct bge_softc *, int, int);
984263bc 324
33c39a69
JS
325static int bge_miibus_readreg(device_t, int, int);
326static int bge_miibus_writereg(device_t, int, int, int);
327static void bge_miibus_statchg(device_t);
db861466
SZ
328static void bge_bcm5700_link_upd(struct bge_softc *, uint32_t);
329static void bge_tbi_link_upd(struct bge_softc *, uint32_t);
330static void bge_copper_link_upd(struct bge_softc *, uint32_t);
984263bc 331
33c39a69 332static void bge_reset(struct bge_softc *);
984263bc 333
20c9a969
SZ
334static int bge_dma_alloc(struct bge_softc *);
335static void bge_dma_free(struct bge_softc *);
336static int bge_dma_block_alloc(struct bge_softc *, bus_size_t,
337 bus_dma_tag_t *, bus_dmamap_t *,
338 void **, bus_addr_t *);
339static void bge_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
340
591dfc77
SZ
341static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
342static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
343static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
344static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
345
055d06f0
SZ
346static void bge_coal_change(struct bge_softc *);
347static int bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS);
348static int bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS);
349static int bge_sysctl_rx_max_coal_bds(SYSCTL_HANDLER_ARGS);
350static int bge_sysctl_tx_max_coal_bds(SYSCTL_HANDLER_ARGS);
351static int bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *, uint32_t);
352
5c56d5d8
SZ
353/*
354 * Set following tunable to 1 for some IBM blade servers with the DNLK
355 * switch module. Auto negotiation is broken for those configurations.
356 */
357static int bge_fake_autoneg = 0;
358TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg);
359
055d06f0 360/* Interrupt moderation control variables. */
61b4fa0a 361static int bge_rx_coal_ticks = 100; /* usec */
83015dae 362static int bge_tx_coal_ticks = 1023; /* usec */
b02a2d90 363static int bge_rx_max_coal_bds = 80;
77dee542 364static int bge_tx_max_coal_bds = 128;
055d06f0
SZ
365
366TUNABLE_INT("hw.bge.rx_coal_ticks", &bge_rx_coal_ticks);
367TUNABLE_INT("hw.bge.tx_coal_ticks", &bge_tx_coal_ticks);
368TUNABLE_INT("hw.bge.rx_max_coal_bds", &bge_rx_max_coal_bds);
369TUNABLE_INT("hw.bge.tx_max_coal_bds", &bge_tx_max_coal_bds);
370
6b880771
SZ
371#if !defined(KTR_IF_BGE)
372#define KTR_IF_BGE KTR_ALL
373#endif
374KTR_INFO_MASTER(if_bge);
375KTR_INFO(KTR_IF_BGE, if_bge, intr, 0, "intr", 0);
376KTR_INFO(KTR_IF_BGE, if_bge, rx_pkt, 1, "rx_pkt", 0);
377KTR_INFO(KTR_IF_BGE, if_bge, tx_pkt, 2, "tx_pkt", 0);
378#define logif(name) KTR_LOG(if_bge_ ## name)
379
984263bc
MD
380static device_method_t bge_methods[] = {
381 /* Device interface */
382 DEVMETHOD(device_probe, bge_probe),
383 DEVMETHOD(device_attach, bge_attach),
384 DEVMETHOD(device_detach, bge_detach),
385 DEVMETHOD(device_shutdown, bge_shutdown),
aa65409c
SZ
386 DEVMETHOD(device_suspend, bge_suspend),
387 DEVMETHOD(device_resume, bge_resume),
984263bc
MD
388
389 /* bus interface */
390 DEVMETHOD(bus_print_child, bus_generic_print_child),
391 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
392
393 /* MII interface */
394 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
395 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
396 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
397
398 { 0, 0 }
399};
400
33c39a69 401static DEFINE_CLASS_0(bge, bge_driver, bge_methods, sizeof(struct bge_softc));
984263bc
MD
402static devclass_t bge_devclass;
403
32832096 404DECLARE_DUMMY_MODULE(if_bge);
984263bc
MD
405DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, 0, 0);
406DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
407
33c39a69
JS
408static uint32_t
409bge_readmem_ind(struct bge_softc *sc, uint32_t off)
984263bc 410{
33c39a69 411 device_t dev = sc->bge_dev;
0ecb11d7 412 uint32_t val;
984263bc
MD
413
414 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
0ecb11d7
SZ
415 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
416 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
417 return (val);
984263bc
MD
418}
419
420static void
33c39a69 421bge_writemem_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
984263bc 422{
33c39a69 423 device_t dev = sc->bge_dev;
984263bc
MD
424
425 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
426 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
0ecb11d7 427 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
984263bc
MD
428}
429
430#ifdef notdef
33c39a69
JS
431static uint32_t
432bge_readreg_ind(struct bge_softc *sc, uin32_t off)
984263bc 433{
33c39a69 434 device_t dev = sc->bge_dev;
984263bc
MD
435
436 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
437 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
438}
439#endif
440
441static void
33c39a69 442bge_writereg_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
984263bc 443{
33c39a69 444 device_t dev = sc->bge_dev;
984263bc
MD
445
446 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
447 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
984263bc
MD
448}
449
0ecb11d7
SZ
450static void
451bge_writemem_direct(struct bge_softc *sc, uint32_t off, uint32_t val)
452{
453 CSR_WRITE_4(sc, off, val);
454}
455
591dfc77
SZ
456static void
457bge_writembx(struct bge_softc *sc, int off, int val)
458{
459 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
460 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
461
462 CSR_WRITE_4(sc, off, val);
463}
464
465static uint8_t
466bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
467{
468 uint32_t access, byte = 0;
469 int i;
470
471 /* Lock. */
472 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
473 for (i = 0; i < 8000; i++) {
474 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
475 break;
476 DELAY(20);
477 }
478 if (i == 8000)
479 return (1);
480
481 /* Enable access. */
482 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
483 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
484
485 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
486 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
487 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
488 DELAY(10);
489 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
490 DELAY(10);
491 break;
492 }
493 }
494
495 if (i == BGE_TIMEOUT * 10) {
496 if_printf(&sc->arpcom.ac_if, "nvram read timed out\n");
497 return (1);
498 }
499
500 /* Get result. */
501 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
502
503 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
504
505 /* Disable access. */
506 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
507
508 /* Unlock. */
509 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
510 CSR_READ_4(sc, BGE_NVRAM_SWARB);
511
512 return (0);
513}
514
515/*
516 * Read a sequence of bytes from NVRAM.
517 */
518static int
519bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
520{
521 int err = 0, i;
522 uint8_t byte = 0;
523
524 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
525 return (1);
526
527 for (i = 0; i < cnt; i++) {
528 err = bge_nvram_getbyte(sc, off + i, &byte);
529 if (err)
530 break;
531 *(dest + i) = byte;
532 }
533
534 return (err ? 1 : 0);
535}
536
984263bc
MD
537/*
538 * Read a byte of data stored in the EEPROM at address 'addr.' The
539 * BCM570x supports both the traditional bitbang interface and an
540 * auto access interface for reading the EEPROM. We use the auto
541 * access method.
542 */
33c39a69
JS
543static uint8_t
544bge_eeprom_getbyte(struct bge_softc *sc, uint32_t addr, uint8_t *dest)
984263bc
MD
545{
546 int i;
33c39a69 547 uint32_t byte = 0;
984263bc
MD
548
549 /*
550 * Enable use of auto EEPROM access so we can avoid
551 * having to use the bitbang method.
552 */
553 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
554
555 /* Reset the EEPROM, load the clock period. */
556 CSR_WRITE_4(sc, BGE_EE_ADDR,
557 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
558 DELAY(20);
559
560 /* Issue the read EEPROM command. */
561 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
562
563 /* Wait for completion */
564 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
565 DELAY(10);
566 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
567 break;
568 }
569
570 if (i == BGE_TIMEOUT) {
c6fd6f3b 571 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
7b47d9c2 572 return(1);
984263bc
MD
573 }
574
575 /* Get result. */
576 byte = CSR_READ_4(sc, BGE_EE_DATA);
577
578 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
579
580 return(0);
581}
582
583/*
584 * Read a sequence of bytes from the EEPROM.
585 */
586static int
33c39a69 587bge_read_eeprom(struct bge_softc *sc, caddr_t dest, uint32_t off, size_t len)
984263bc 588{
33c39a69
JS
589 size_t i;
590 int err;
591 uint8_t byte;
984263bc 592
33c39a69 593 for (byte = 0, err = 0, i = 0; i < len; i++) {
984263bc
MD
594 err = bge_eeprom_getbyte(sc, off + i, &byte);
595 if (err)
596 break;
597 *(dest + i) = byte;
598 }
599
600 return(err ? 1 : 0);
601}
602
603static int
33c39a69 604bge_miibus_readreg(device_t dev, int phy, int reg)
984263bc 605{
f7a1f3ba
SZ
606 struct bge_softc *sc = device_get_softc(dev);
607 struct ifnet *ifp = &sc->arpcom.ac_if;
33c39a69 608 uint32_t val, autopoll;
984263bc
MD
609 int i;
610
7e40b8c5
HP
611 /*
612 * Broadcom's own driver always assumes the internal
613 * PHY is at GMII address 1. On some chips, the PHY responds
614 * to accesses at all addresses, which could cause us to
615 * bogusly attach the PHY 32 times at probe type. Always
616 * restricting the lookup to address 1 is simpler than
617 * trying to figure out which chips revisions should be
618 * special-cased.
619 */
984263bc 620 if (phy != 1)
7e40b8c5 621 return(0);
984263bc
MD
622
623 /* Reading with autopolling on may trigger PCI errors */
624 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
625 if (autopoll & BGE_MIMODE_AUTOPOLL) {
626 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
627 DELAY(40);
628 }
629
630 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
631 BGE_MIPHY(phy)|BGE_MIREG(reg));
632
633 for (i = 0; i < BGE_TIMEOUT; i++) {
f7a1f3ba 634 DELAY(10);
984263bc
MD
635 val = CSR_READ_4(sc, BGE_MI_COMM);
636 if (!(val & BGE_MICOMM_BUSY))
637 break;
638 }
639
640 if (i == BGE_TIMEOUT) {
f7a1f3ba
SZ
641 if_printf(ifp, "PHY read timed out "
642 "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val);
984263bc
MD
643 val = 0;
644 goto done;
645 }
646
f7a1f3ba 647 DELAY(5);
984263bc
MD
648 val = CSR_READ_4(sc, BGE_MI_COMM);
649
650done:
651 if (autopoll & BGE_MIMODE_AUTOPOLL) {
652 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
653 DELAY(40);
654 }
655
656 if (val & BGE_MICOMM_READFAIL)
657 return(0);
658
659 return(val & 0xFFFF);
660}
661
662static int
33c39a69 663bge_miibus_writereg(device_t dev, int phy, int reg, int val)
984263bc 664{
f7a1f3ba 665 struct bge_softc *sc = device_get_softc(dev);
33c39a69 666 uint32_t autopoll;
984263bc
MD
667 int i;
668
f7a1f3ba
SZ
669 /*
670 * See the related comment in bge_miibus_readreg()
671 */
672 if (phy != 1)
673 return(0);
984263bc 674
591dfc77
SZ
675 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
676 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
677 return(0);
678
984263bc
MD
679 /* Reading with autopolling on may trigger PCI errors */
680 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
681 if (autopoll & BGE_MIMODE_AUTOPOLL) {
682 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
683 DELAY(40);
684 }
685
686 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
687 BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
688
689 for (i = 0; i < BGE_TIMEOUT; i++) {
f7a1f3ba
SZ
690 DELAY(10);
691 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
692 DELAY(5);
693 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
984263bc 694 break;
f7a1f3ba 695 }
984263bc
MD
696 }
697
698 if (autopoll & BGE_MIMODE_AUTOPOLL) {
699 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
700 DELAY(40);
701 }
702
703 if (i == BGE_TIMEOUT) {
f7a1f3ba
SZ
704 if_printf(&sc->arpcom.ac_if, "PHY write timed out "
705 "(phy %d, reg %d, val %d)\n", phy, reg, val);
984263bc
MD
706 return(0);
707 }
708
709 return(0);
710}
711
712static void
33c39a69 713bge_miibus_statchg(device_t dev)
984263bc
MD
714{
715 struct bge_softc *sc;
716 struct mii_data *mii;
717
718 sc = device_get_softc(dev);
719 mii = device_get_softc(sc->bge_miibus);
720
721 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
7f259627 722 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
984263bc
MD
723 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
724 } else {
725 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
726 }
727
728 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
729 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
730 } else {
731 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
732 }
984263bc
MD
733}
734
984263bc
MD
735/*
736 * Memory management for jumbo frames.
737 */
984263bc 738static int
33c39a69 739bge_alloc_jumbo_mem(struct bge_softc *sc)
984263bc 740{
20c9a969 741 struct ifnet *ifp = &sc->arpcom.ac_if;
2aa9b12f 742 struct bge_jslot *entry;
20c9a969
SZ
743 uint8_t *ptr;
744 bus_addr_t paddr;
745 int i, error;
984263bc 746
20c9a969
SZ
747 /*
748 * Create tag for jumbo mbufs.
749 * This is really a bit of a kludge. We allocate a special
750 * jumbo buffer pool which (thanks to the way our DMA
751 * memory allocation works) will consist of contiguous
752 * pages. This means that even though a jumbo buffer might
753 * be larger than a page size, we don't really need to
754 * map it into more than one DMA segment. However, the
755 * default mbuf tag will result in multi-segment mappings,
756 * so we have to create a special jumbo mbuf tag that
757 * lets us get away with mapping the jumbo buffers as
758 * a single segment. I think eventually the driver should
759 * be changed so that it uses ordinary mbufs and cluster
760 * buffers, i.e. jumbo frames can span multiple DMA
761 * descriptors. But that's a project for another day.
762 */
984263bc 763
20c9a969
SZ
764 /*
765 * Create DMA stuffs for jumbo RX ring.
766 */
767 error = bge_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ,
768 &sc->bge_cdata.bge_rx_jumbo_ring_tag,
769 &sc->bge_cdata.bge_rx_jumbo_ring_map,
770 (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
771 &sc->bge_ldata.bge_rx_jumbo_ring_paddr);
772 if (error) {
773 if_printf(ifp, "could not create jumbo RX ring\n");
774 return error;
775 }
776
777 /*
778 * Create DMA stuffs for jumbo buffer block.
779 */
780 error = bge_dma_block_alloc(sc, BGE_JMEM,
781 &sc->bge_cdata.bge_jumbo_tag,
782 &sc->bge_cdata.bge_jumbo_map,
783 (void **)&sc->bge_ldata.bge_jumbo_buf,
784 &paddr);
785 if (error) {
786 if_printf(ifp, "could not create jumbo buffer\n");
787 return error;
984263bc
MD
788 }
789
790 SLIST_INIT(&sc->bge_jfree_listhead);
984263bc
MD
791
792 /*
793 * Now divide it up into 9K pieces and save the addresses
794 * in an array. Note that we play an evil trick here by using
795 * the first few bytes in the buffer to hold the the address
796 * of the softc structure for this interface. This is because
797 * bge_jfree() needs it, but it is called by the mbuf management
798 * code which will not pass it to us explicitly.
799 */
20c9a969 800 for (i = 0, ptr = sc->bge_ldata.bge_jumbo_buf; i < BGE_JSLOTS; i++) {
2aa9b12f
JS
801 entry = &sc->bge_cdata.bge_jslots[i];
802 entry->bge_sc = sc;
803 entry->bge_buf = ptr;
20c9a969 804 entry->bge_paddr = paddr;
2aa9b12f
JS
805 entry->bge_inuse = 0;
806 entry->bge_slot = i;
807 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jslot_link);
20c9a969 808
2aa9b12f 809 ptr += BGE_JLEN;
20c9a969 810 paddr += BGE_JLEN;
984263bc 811 }
20c9a969 812 return 0;
984263bc
MD
813}
814
815static void
33c39a69 816bge_free_jumbo_mem(struct bge_softc *sc)
984263bc 817{
20c9a969
SZ
818 /* Destroy jumbo RX ring. */
819 bge_dma_block_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
820 sc->bge_cdata.bge_rx_jumbo_ring_map,
821 sc->bge_ldata.bge_rx_jumbo_ring);
822
823 /* Destroy jumbo buffer block. */
824 bge_dma_block_free(sc->bge_cdata.bge_jumbo_tag,
825 sc->bge_cdata.bge_jumbo_map,
826 sc->bge_ldata.bge_jumbo_buf);
984263bc
MD
827}
828
829/*
830 * Allocate a jumbo buffer.
831 */
2aa9b12f 832static struct bge_jslot *
33c39a69 833bge_jalloc(struct bge_softc *sc)
984263bc 834{
2aa9b12f 835 struct bge_jslot *entry;
33c39a69 836
16dca0df 837 lwkt_serialize_enter(&sc->bge_jslot_serializer);
984263bc 838 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
16dca0df
MD
839 if (entry) {
840 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jslot_link);
841 entry->bge_inuse = 1;
842 } else {
c6fd6f3b 843 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
984263bc 844 }
16dca0df 845 lwkt_serialize_exit(&sc->bge_jslot_serializer);
2aa9b12f 846 return(entry);
984263bc
MD
847}
848
849/*
850 * Adjust usage count on a jumbo buffer.
851 */
852static void
2aa9b12f 853bge_jref(void *arg)
984263bc 854{
2aa9b12f
JS
855 struct bge_jslot *entry = (struct bge_jslot *)arg;
856 struct bge_softc *sc = entry->bge_sc;
984263bc
MD
857
858 if (sc == NULL)
859 panic("bge_jref: can't find softc pointer!");
860
16dca0df 861 if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
984263bc
MD
862 panic("bge_jref: asked to reference buffer "
863 "that we don't manage!");
16dca0df 864 } else if (entry->bge_inuse == 0) {
984263bc 865 panic("bge_jref: buffer already free!");
16dca0df
MD
866 } else {
867 atomic_add_int(&entry->bge_inuse, 1);
868 }
984263bc
MD
869}
870
871/*
872 * Release a jumbo buffer.
873 */
874static void
2aa9b12f 875bge_jfree(void *arg)
984263bc 876{
2aa9b12f
JS
877 struct bge_jslot *entry = (struct bge_jslot *)arg;
878 struct bge_softc *sc = entry->bge_sc;
984263bc
MD
879
880 if (sc == NULL)
881 panic("bge_jfree: can't find softc pointer!");
882
16dca0df 883 if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
984263bc 884 panic("bge_jfree: asked to free buffer that we don't manage!");
16dca0df 885 } else if (entry->bge_inuse == 0) {
984263bc 886 panic("bge_jfree: buffer already free!");
16dca0df
MD
887 } else {
888 /*
889 * Possible MP race to 0, use the serializer. The atomic insn
890 * is still needed for races against bge_jref().
891 */
892 lwkt_serialize_enter(&sc->bge_jslot_serializer);
893 atomic_subtract_int(&entry->bge_inuse, 1);
894 if (entry->bge_inuse == 0) {
895 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
896 entry, jslot_link);
897 }
898 lwkt_serialize_exit(&sc->bge_jslot_serializer);
899 }
984263bc
MD
900}
901
902
903/*
904 * Intialize a standard receive ring descriptor.
905 */
906static int
1436f9a0 907bge_newbuf_std(struct bge_softc *sc, int i, int init)
984263bc 908{
33c39a69 909 struct mbuf *m_new = NULL;
20c9a969 910 bus_dma_segment_t seg;
1436f9a0 911 bus_dmamap_t map;
2de621e9 912 int error, nsegs;
984263bc 913
1436f9a0
SZ
914 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
915 if (m_new == NULL)
916 return ENOBUFS;
20c9a969 917 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
984263bc 918
0ecb11d7 919 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
984263bc 920 m_adj(m_new, ETHER_ALIGN);
20c9a969 921
2de621e9
SZ
922 error = bus_dmamap_load_mbuf_segment(sc->bge_cdata.bge_rx_mtag,
923 sc->bge_cdata.bge_rx_tmpmap, m_new,
924 &seg, 1, &nsegs, BUS_DMA_NOWAIT);
925 if (error) {
1436f9a0 926 m_freem(m_new);
2de621e9 927 return error;
20c9a969
SZ
928 }
929
1436f9a0
SZ
930 if (!init) {
931 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
932 sc->bge_cdata.bge_rx_std_dmamap[i],
933 BUS_DMASYNC_POSTREAD);
934 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
935 sc->bge_cdata.bge_rx_std_dmamap[i]);
936 }
20c9a969 937
1436f9a0
SZ
938 map = sc->bge_cdata.bge_rx_tmpmap;
939 sc->bge_cdata.bge_rx_tmpmap = sc->bge_cdata.bge_rx_std_dmamap[i];
940 sc->bge_cdata.bge_rx_std_dmamap[i] = map;
941
942 sc->bge_cdata.bge_rx_std_chain[i].bge_mbuf = m_new;
2de621e9 943 sc->bge_cdata.bge_rx_std_chain[i].bge_paddr = seg.ds_addr;
1436f9a0
SZ
944
945 bge_setup_rxdesc_std(sc, i);
20c9a969 946 return 0;
984263bc
MD
947}
948
1436f9a0
SZ
949static void
950bge_setup_rxdesc_std(struct bge_softc *sc, int i)
951{
952 struct bge_rxchain *rc;
953 struct bge_rx_bd *r;
954
955 rc = &sc->bge_cdata.bge_rx_std_chain[i];
956 r = &sc->bge_ldata.bge_rx_std_ring[i];
957
958 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bge_paddr);
959 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bge_paddr);
960 r->bge_len = rc->bge_mbuf->m_len;
961 r->bge_idx = i;
962 r->bge_flags = BGE_RXBDFLAG_END;
963}
964
984263bc
MD
965/*
966 * Initialize a jumbo receive ring descriptor. This allocates
967 * a jumbo buffer from the pool managed internally by the driver.
968 */
969static int
1436f9a0 970bge_newbuf_jumbo(struct bge_softc *sc, int i, int init)
984263bc
MD
971{
972 struct mbuf *m_new = NULL;
20c9a969 973 struct bge_jslot *buf;
20c9a969 974 bus_addr_t paddr;
984263bc 975
1436f9a0
SZ
976 /* Allocate the mbuf. */
977 MGETHDR(m_new, init ? MB_WAIT : MB_DONTWAIT, MT_DATA);
978 if (m_new == NULL)
979 return ENOBUFS;
2aa9b12f 980
1436f9a0
SZ
981 /* Allocate the jumbo buffer */
982 buf = bge_jalloc(sc);
983 if (buf == NULL) {
984 m_freem(m_new);
985 return ENOBUFS;
984263bc 986 }
1436f9a0
SZ
987
988 /* Attach the buffer to the mbuf. */
989 m_new->m_ext.ext_arg = buf;
990 m_new->m_ext.ext_buf = buf->bge_buf;
991 m_new->m_ext.ext_free = bge_jfree;
992 m_new->m_ext.ext_ref = bge_jref;
993 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
994
995 m_new->m_flags |= M_EXT;
996
20c9a969
SZ
997 m_new->m_data = m_new->m_ext.ext_buf;
998 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
984263bc 999
20c9a969 1000 paddr = buf->bge_paddr;
0ecb11d7 1001 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) {
984263bc 1002 m_adj(m_new, ETHER_ALIGN);
20c9a969
SZ
1003 paddr += ETHER_ALIGN;
1004 }
1005
1436f9a0
SZ
1006 /* Save necessary information */
1007 sc->bge_cdata.bge_rx_jumbo_chain[i].bge_mbuf = m_new;
1008 sc->bge_cdata.bge_rx_jumbo_chain[i].bge_paddr = paddr;
1009
984263bc 1010 /* Set up the descriptor. */
1436f9a0
SZ
1011 bge_setup_rxdesc_jumbo(sc, i);
1012 return 0;
1013}
1014
1015static void
1016bge_setup_rxdesc_jumbo(struct bge_softc *sc, int i)
1017{
1018 struct bge_rx_bd *r;
1019 struct bge_rxchain *rc;
20c9a969
SZ
1020
1021 r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
1436f9a0 1022 rc = &sc->bge_cdata.bge_rx_jumbo_chain[i];
984263bc 1023
1436f9a0
SZ
1024 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bge_paddr);
1025 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bge_paddr);
1026 r->bge_len = rc->bge_mbuf->m_len;
1027 r->bge_idx = i;
1028 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
984263bc
MD
1029}
1030
984263bc 1031static int
33c39a69 1032bge_init_rx_ring_std(struct bge_softc *sc)
984263bc 1033{
1436f9a0 1034 int i, error;
984263bc 1035
1436f9a0
SZ
1036 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1037 error = bge_newbuf_std(sc, i, 1);
1038 if (error)
1039 return error;
984263bc
MD
1040 };
1041
1436f9a0 1042 sc->bge_std = BGE_STD_RX_RING_CNT - 1;
591dfc77 1043 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
984263bc
MD
1044
1045 return(0);
1046}
1047
1048static void
33c39a69 1049bge_free_rx_ring_std(struct bge_softc *sc)
984263bc
MD
1050{
1051 int i;
1052
1053 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1436f9a0
SZ
1054 struct bge_rxchain *rc = &sc->bge_cdata.bge_rx_std_chain[i];
1055
1056 if (rc->bge_mbuf != NULL) {
ddca511d 1057 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
20c9a969 1058 sc->bge_cdata.bge_rx_std_dmamap[i]);
1436f9a0
SZ
1059 m_freem(rc->bge_mbuf);
1060 rc->bge_mbuf = NULL;
984263bc 1061 }
20c9a969 1062 bzero(&sc->bge_ldata.bge_rx_std_ring[i],
984263bc
MD
1063 sizeof(struct bge_rx_bd));
1064 }
984263bc
MD
1065}
1066
1067static int
33c39a69 1068bge_init_rx_ring_jumbo(struct bge_softc *sc)
984263bc 1069{
984263bc 1070 struct bge_rcb *rcb;
1436f9a0 1071 int i, error;
984263bc
MD
1072
1073 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1436f9a0
SZ
1074 error = bge_newbuf_jumbo(sc, i, 1);
1075 if (error)
1076 return error;
984263bc
MD
1077 };
1078
1436f9a0 1079 sc->bge_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
984263bc 1080
20c9a969 1081 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
984263bc
MD
1082 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
1083 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1084
591dfc77 1085 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
984263bc
MD
1086
1087 return(0);
1088}
1089
1090static void
33c39a69 1091bge_free_rx_ring_jumbo(struct bge_softc *sc)
984263bc
MD
1092{
1093 int i;
1094
1095 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1436f9a0
SZ
1096 struct bge_rxchain *rc = &sc->bge_cdata.bge_rx_jumbo_chain[i];
1097
1098 if (rc->bge_mbuf != NULL) {
1099 m_freem(rc->bge_mbuf);
1100 rc->bge_mbuf = NULL;
984263bc 1101 }
20c9a969 1102 bzero(&sc->bge_ldata.bge_rx_jumbo_ring[i],
984263bc
MD
1103 sizeof(struct bge_rx_bd));
1104 }
984263bc
MD
1105}
1106
1107static void
33c39a69 1108bge_free_tx_ring(struct bge_softc *sc)
984263bc
MD
1109{
1110 int i;
1111
984263bc
MD
1112 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1113 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
ddca511d 1114 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
20c9a969 1115 sc->bge_cdata.bge_tx_dmamap[i]);
984263bc
MD
1116 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1117 sc->bge_cdata.bge_tx_chain[i] = NULL;
1118 }
20c9a969 1119 bzero(&sc->bge_ldata.bge_tx_ring[i],
984263bc
MD
1120 sizeof(struct bge_tx_bd));
1121 }
984263bc
MD
1122}
1123
1124static int
33c39a69 1125bge_init_tx_ring(struct bge_softc *sc)
984263bc
MD
1126{
1127 sc->bge_txcnt = 0;
1128 sc->bge_tx_saved_considx = 0;
94db8384
SZ
1129 sc->bge_tx_prodidx = 0;
1130
1131 /* Initialize transmit producer index for host-memory send ring. */
591dfc77 1132 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
984263bc 1133
984263bc
MD
1134 /* 5700 b2 errata */
1135 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
591dfc77 1136 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
984263bc 1137
591dfc77 1138 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
984263bc
MD
1139 /* 5700 b2 errata */
1140 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
591dfc77 1141 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
984263bc
MD
1142
1143 return(0);
1144}
1145
984263bc 1146static void
33c39a69 1147bge_setmulti(struct bge_softc *sc)
984263bc
MD
1148{
1149 struct ifnet *ifp;
1150 struct ifmultiaddr *ifma;
33c39a69 1151 uint32_t hashes[4] = { 0, 0, 0, 0 };
984263bc
MD
1152 int h, i;
1153
1154 ifp = &sc->arpcom.ac_if;
1155
1156 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1157 for (i = 0; i < 4; i++)
1158 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1159 return;
1160 }
1161
1162 /* First, zot all the existing filters. */
1163 for (i = 0; i < 4; i++)
1164 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1165
1166 /* Now program new ones. */
441d34b2 1167 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
984263bc
MD
1168 if (ifma->ifma_addr->sa_family != AF_LINK)
1169 continue;
3b4ec5b8
JS
1170 h = ether_crc32_le(
1171 LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1172 ETHER_ADDR_LEN) & 0x7f;
984263bc
MD
1173 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1174 }
1175
1176 for (i = 0; i < 4; i++)
1177 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
984263bc
MD
1178}
1179
1180/*
1181 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1182 * self-test results.
1183 */
1184static int
33c39a69 1185bge_chipinit(struct bge_softc *sc)
984263bc 1186{
33c39a69
JS
1187 int i;
1188 uint32_t dma_rw_ctl;
984263bc 1189
20c9a969
SZ
1190 /* Set endian type before we access any non-PCI registers. */
1191 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
984263bc 1192
984263bc
MD
1193 /* Clear the MAC control register */
1194 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1195
1196 /*
1197 * Clear the MAC statistics block in the NIC's
1198 * internal memory.
1199 */
1200 for (i = BGE_STATS_BLOCK;
33c39a69 1201 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
984263bc
MD
1202 BGE_MEMWIN_WRITE(sc, i, 0);
1203
1204 for (i = BGE_STATUS_BLOCK;
33c39a69 1205 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
984263bc
MD
1206 BGE_MEMWIN_WRITE(sc, i, 0);
1207
1208 /* Set up the PCI DMA control register. */
0ecb11d7 1209 if (sc->bge_flags & BGE_FLAG_PCIE) {
9a6ee7e2
JS
1210 /* PCI Express */
1211 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1212 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1213 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
0ecb11d7 1214 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
984263bc 1215 /* PCI-X bus */
0ecb11d7
SZ
1216 if (BGE_IS_5714_FAMILY(sc)) {
1217 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD;
1218 dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */
1219 /* XXX magic values, Broadcom-supplied Linux driver */
1220 if (sc->bge_asicrev == BGE_ASICREV_BCM5780) {
1221 dma_rw_ctl |= (1 << 20) | (1 << 18) |
1222 BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1223 } else {
1224 dma_rw_ctl |= (1 << 20) | (1 << 18) | (1 << 15);
1225 }
1226 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1227 /*
1228 * The 5704 uses a different encoding of read/write
1229 * watermarks.
1230 */
984263bc
MD
1231 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1232 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1233 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
0ecb11d7 1234 } else {
984263bc
MD
1235 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1236 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1237 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1238 (0x0F);
0ecb11d7 1239 }
984263bc
MD
1240
1241 /*
1242 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1243 * for hardware bugs.
1244 */
1245 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1246 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
33c39a69 1247 uint32_t tmp;
984263bc
MD
1248
1249 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1250 if (tmp == 0x6 || tmp == 0x7)
1251 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1252 }
0ecb11d7
SZ
1253 } else {
1254 /* Conventional PCI bus */
1255 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1256 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1257 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1258 (0x0F);
984263bc
MD
1259 }
1260
1261 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
7e40b8c5 1262 sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
0ecb11d7 1263 sc->bge_asicrev == BGE_ASICREV_BCM5705)
984263bc
MD
1264 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1265 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1266
1267 /*
1268 * Set up general mode register.
1269 */
20c9a969 1270 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
984263bc 1271 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
bf29e666 1272 BGE_MODECTL_TX_NO_PHDR_CSUM);
984263bc
MD
1273
1274 /*
1275 * Disable memory write invalidate. Apparently it is not supported
1276 * properly by these devices.
1277 */
1278 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1279
984263bc
MD
1280 /* Set the timer prescaler (always 66Mhz) */
1281 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1282
591dfc77
SZ
1283 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1284 DELAY(40); /* XXX */
1285
1286 /* Put PHY into ready state */
1287 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1288 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1289 DELAY(40);
1290 }
1291
984263bc
MD
1292 return(0);
1293}
1294
1295static int
33c39a69 1296bge_blockinit(struct bge_softc *sc)
984263bc
MD
1297{
1298 struct bge_rcb *rcb;
20c9a969
SZ
1299 bus_size_t vrcb;
1300 bge_hostaddr taddr;
0ecb11d7 1301 uint32_t val;
984263bc
MD
1302 int i;
1303
1304 /*
1305 * Initialize the memory window pointer register so that
1306 * we can access the first 32K of internal NIC RAM. This will
1307 * allow us to set up the TX send ring RCBs and the RX return
1308 * ring RCBs, plus other things which live in NIC memory.
1309 */
1310 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1311
7e40b8c5
HP
1312 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1313
0ecb11d7 1314 if (!BGE_IS_5705_PLUS(sc)) {
7e40b8c5 1315 /* Configure mbuf memory pool */
0ecb11d7
SZ
1316 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1317 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1318 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1319 else
1320 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
984263bc 1321
7e40b8c5
HP
1322 /* Configure DMA resource pool */
1323 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1324 BGE_DMA_DESCRIPTORS);
1325 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1326 }
984263bc
MD
1327
1328 /* Configure mbuf pool watermarks */
591dfc77 1329 if (!BGE_IS_5705_PLUS(sc)) {
7e40b8c5
HP
1330 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1331 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
591dfc77
SZ
1332 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1333 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1334 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1335 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1336 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1337 } else {
1338 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1339 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1340 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
7e40b8c5 1341 }
984263bc
MD
1342
1343 /* Configure DMA resource watermarks */
1344 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1345 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1346
1347 /* Enable buffer manager */
0ecb11d7 1348 if (!BGE_IS_5705_PLUS(sc)) {
7e40b8c5
HP
1349 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1350 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
984263bc 1351
7e40b8c5
HP
1352 /* Poll for buffer manager start indication */
1353 for (i = 0; i < BGE_TIMEOUT; i++) {
1354 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1355 break;
1356 DELAY(10);
1357 }
984263bc 1358
7e40b8c5 1359 if (i == BGE_TIMEOUT) {
c6fd6f3b
JS
1360 if_printf(&sc->arpcom.ac_if,
1361 "buffer manager failed to start\n");
7e40b8c5
HP
1362 return(ENXIO);
1363 }
984263bc
MD
1364 }
1365
1366 /* Enable flow-through queues */
1367 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1368 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1369
1370 /* Wait until queue initialization is complete */
1371 for (i = 0; i < BGE_TIMEOUT; i++) {
1372 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1373 break;
1374 DELAY(10);
1375 }
1376
1377 if (i == BGE_TIMEOUT) {
c6fd6f3b
JS
1378 if_printf(&sc->arpcom.ac_if,
1379 "flow-through queue init failed\n");
984263bc
MD
1380 return(ENXIO);
1381 }
1382
1383 /* Initialize the standard RX ring control block */
20c9a969
SZ
1384 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1385 rcb->bge_hostaddr.bge_addr_lo =
1386 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1387 rcb->bge_hostaddr.bge_addr_hi =
1388 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
0ecb11d7 1389 if (BGE_IS_5705_PLUS(sc))
7e40b8c5
HP
1390 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1391 else
1392 rcb->bge_maxlen_flags =
1393 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
0ecb11d7 1394 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
984263bc
MD
1395 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1396 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1397 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1398 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1399
1400 /*
1401 * Initialize the jumbo RX ring control block
1402 * We set the 'ring disabled' bit in the flags
1403 * field until we're actually ready to start
1404 * using this ring (i.e. once we set the MTU
1405 * high enough to require it).
1406 */
0ecb11d7 1407 if (BGE_IS_JUMBO_CAPABLE(sc)) {
20c9a969
SZ
1408 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1409
1410 rcb->bge_hostaddr.bge_addr_lo =
1411 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1412 rcb->bge_hostaddr.bge_addr_hi =
1413 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
7e40b8c5
HP
1414 rcb->bge_maxlen_flags =
1415 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1416 BGE_RCB_FLAG_RING_DISABLED);
0ecb11d7 1417 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
7e40b8c5
HP
1418 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1419 rcb->bge_hostaddr.bge_addr_hi);
1420 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1421 rcb->bge_hostaddr.bge_addr_lo);
1422 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1423 rcb->bge_maxlen_flags);
1424 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1425
1426 /* Set up dummy disabled mini ring RCB */
20c9a969 1427 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
7e40b8c5
HP
1428 rcb->bge_maxlen_flags =
1429 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1430 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1431 rcb->bge_maxlen_flags);
1432 }
984263bc
MD
1433
1434 /*
1435 * Set the BD ring replentish thresholds. The recommended
1436 * values are 1/8th the number of descriptors allocated to
1437 * each ring.
1438 */
0ecb11d7
SZ
1439 if (BGE_IS_5705_PLUS(sc))
1440 val = 8;
1441 else
1442 val = BGE_STD_RX_RING_CNT / 8;
1443 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
984263bc
MD
1444 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1445
1446 /*
1447 * Disable all unused send rings by setting the 'ring disabled'
1448 * bit in the flags field of all the TX send ring control blocks.
1449 * These are located in NIC memory.
1450 */
20c9a969 1451 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
984263bc 1452 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
20c9a969
SZ
1453 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1454 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1455 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1456 vrcb += sizeof(struct bge_rcb);
984263bc
MD
1457 }
1458
1459 /* Configure TX RCB 0 (we use only the first ring) */
20c9a969
SZ
1460 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1461 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1462 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1463 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1464 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1465 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
0ecb11d7 1466 if (!BGE_IS_5705_PLUS(sc)) {
20c9a969
SZ
1467 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1468 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1469 }
984263bc
MD
1470
1471 /* Disable all unused RX return rings */
20c9a969 1472 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
984263bc 1473 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
20c9a969
SZ
1474 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1475 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1476 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
7e40b8c5 1477 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
20c9a969
SZ
1478 BGE_RCB_FLAG_RING_DISABLED));
1479 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
591dfc77 1480 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
33c39a69 1481 (i * (sizeof(uint64_t))), 0);
20c9a969 1482 vrcb += sizeof(struct bge_rcb);
984263bc
MD
1483 }
1484
1485 /* Initialize RX ring indexes */
591dfc77
SZ
1486 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1487 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1488 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
984263bc
MD
1489
1490 /*
1491 * Set up RX return ring 0
1492 * Note that the NIC address for RX return rings is 0x00000000.
1493 * The return rings live entirely within the host, so the
1494 * nicaddr field in the RCB isn't used.
1495 */
20c9a969
SZ
1496 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1497 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1498 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1499 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1500 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1501 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1502 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
984263bc
MD
1503
1504 /* Set random backoff seed for TX */
1505 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1506 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1507 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1508 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1509 BGE_TX_BACKOFF_SEED_MASK);
1510
1511 /* Set inter-packet gap */
1512 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1513
1514 /*
1515 * Specify which ring to use for packets that don't match
1516 * any RX rules.
1517 */
1518 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1519
1520 /*
1521 * Configure number of RX lists. One interrupt distribution
1522 * list, sixteen active lists, one bad frames class.
1523 */
1524 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1525
1526 /* Inialize RX list placement stats mask. */
1527 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1528 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1529
1530 /* Disable host coalescing until we get it set up */
1531 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1532
1533 /* Poll to make sure it's shut down. */
1534 for (i = 0; i < BGE_TIMEOUT; i++) {
1535 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1536 break;
1537 DELAY(10);
1538 }
1539
1540 if (i == BGE_TIMEOUT) {
c6fd6f3b
JS
1541 if_printf(&sc->arpcom.ac_if,
1542 "host coalescing engine failed to idle\n");
984263bc
MD
1543 return(ENXIO);
1544 }
1545
1546 /* Set up host coalescing defaults */
1547 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1548 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1549 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1550 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
0ecb11d7 1551 if (!BGE_IS_5705_PLUS(sc)) {
7e40b8c5
HP
1552 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1553 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1554 }
ba39cc82
SZ
1555 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1556 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
984263bc
MD
1557
1558 /* Set up address of statistics block */
0ecb11d7 1559 if (!BGE_IS_5705_PLUS(sc)) {
20c9a969
SZ
1560 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1561 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
7e40b8c5 1562 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
20c9a969 1563 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
7e40b8c5
HP
1564
1565 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1566 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1567 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1568 }
984263bc
MD
1569
1570 /* Set up address of status block */
20c9a969
SZ
1571 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1572 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
984263bc 1573 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
20c9a969
SZ
1574 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1575 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1576 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
984263bc
MD
1577
1578 /* Turn on host coalescing state machine */
1579 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1580
1581 /* Turn on RX BD completion state machine and enable attentions */
1582 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1583 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1584
1585 /* Turn on RX list placement state machine */
1586 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1587
1588 /* Turn on RX list selector state machine. */
0ecb11d7 1589 if (!BGE_IS_5705_PLUS(sc))
7e40b8c5 1590 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
984263bc
MD
1591
1592 /* Turn on DMA, clear stats */
1593 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1594 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1595 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1596 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
0ecb11d7
SZ
1597 ((sc->bge_flags & BGE_FLAG_TBI) ?
1598 BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
984263bc
MD
1599
1600 /* Set misc. local control, enable interrupts on attentions */
1601 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1602
1603#ifdef notdef
1604 /* Assert GPIO pins for PHY reset */
1605 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1606 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1607 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1608 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1609#endif
1610
1611 /* Turn on DMA completion state machine */
0ecb11d7 1612 if (!BGE_IS_5705_PLUS(sc))
7e40b8c5 1613 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
984263bc
MD
1614
1615 /* Turn on write DMA state machine */
0ecb11d7
SZ
1616 val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1617 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1618 sc->bge_asicrev == BGE_ASICREV_BCM5787)
1619 val |= (1 << 29); /* Enable host coalescing bug fix. */
1620 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
671bd7ed
SZ
1621 DELAY(40);
1622
984263bc 1623 /* Turn on read DMA state machine */
671bd7ed
SZ
1624 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1625 if (sc->bge_flags & BGE_FLAG_PCIE)
1626 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1627 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1628 DELAY(40);
984263bc
MD
1629
1630 /* Turn on RX data completion state machine */
1631 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1632
1633 /* Turn on RX BD initiator state machine */
1634 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1635
1636 /* Turn on RX data and RX BD initiator state machine */
1637 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1638
1639 /* Turn on Mbuf cluster free state machine */
0ecb11d7 1640 if (!BGE_IS_5705_PLUS(sc))
7e40b8c5 1641 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
984263bc
MD
1642
1643 /* Turn on send BD completion state machine */
1644 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1645
1646 /* Turn on send data completion state machine */
1647 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1648
1649 /* Turn on send data initiator state machine */
1650 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1651
1652 /* Turn on send BD initiator state machine */
1653 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1654
1655 /* Turn on send BD selector state machine */
1656 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1657
1658 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1659 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1660 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1661
1662 /* ack/clear link change events */
1663 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
7e40b8c5
HP
1664 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1665 BGE_MACSTAT_LINK_CHANGED);
20c9a969 1666 CSR_WRITE_4(sc, BGE_MI_STS, 0);
984263bc
MD
1667
1668 /* Enable PHY auto polling (for MII/GMII only) */
0ecb11d7 1669 if (sc->bge_flags & BGE_FLAG_TBI) {
984263bc
MD
1670 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1671 } else {
1672 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
db861466
SZ
1673 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1674 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
984263bc
MD
1675 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1676 BGE_EVTENB_MI_INTERRUPT);
db861466 1677 }
984263bc
MD
1678 }
1679
db861466
SZ
1680 /*
1681 * Clear any pending link state attention.
1682 * Otherwise some link state change events may be lost until attention
1683 * is cleared by bge_intr() -> bge_softc.bge_link_upd() sequence.
1684 * It's not necessary on newer BCM chips - perhaps enabling link
1685 * state change attentions implies clearing pending attention.
1686 */
1687 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1688 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1689 BGE_MACSTAT_LINK_CHANGED);
1690
984263bc
MD
1691 /* Enable link state change attentions. */
1692 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1693
1694 return(0);
1695}
1696
1697/*
1698 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1699 * against our list and return its name if we find a match. Note
1700 * that since the Broadcom controller contains VPD support, we
1701 * can get the device name string from the controller itself instead
1702 * of the compiled-in string. This is a little slow, but it guarantees
1703 * we'll always announce the right product name.
1704 */
1705static int
33c39a69 1706bge_probe(device_t dev)
984263bc 1707{
d265721a 1708 const struct bge_type *t;
33c39a69
JS
1709 uint16_t product, vendor;
1710
1711 product = pci_get_device(dev);
1712 vendor = pci_get_vendor(dev);
1713
1714 for (t = bge_devs; t->bge_name != NULL; t++) {
1715 if (vendor == t->bge_vid && product == t->bge_did)
1716 break;
1717 }
33c39a69
JS
1718 if (t->bge_name == NULL)
1719 return(ENXIO);
984263bc 1720
d265721a
SZ
1721 device_set_desc(dev, t->bge_name);
1722 if (pci_get_subvendor(dev) == PCI_VENDOR_DELL) {
1723 struct bge_softc *sc = device_get_softc(dev);
0ecb11d7 1724 sc->bge_flags |= BGE_FLAG_NO_3LED;
d265721a 1725 }
33c39a69 1726 return(0);
984263bc
MD
1727}
1728
1729static int
33c39a69 1730bge_attach(device_t dev)
984263bc 1731{
984263bc
MD
1732 struct ifnet *ifp;
1733 struct bge_softc *sc;
33c39a69 1734 uint32_t hwcfg = 0;
c6fd6f3b 1735 int error = 0, rid;
0a8b5977 1736 uint8_t ether_addr[ETHER_ADDR_LEN];
984263bc 1737
984263bc 1738 sc = device_get_softc(dev);
984263bc 1739 sc->bge_dev = dev;
263489fb 1740 callout_init(&sc->bge_stat_timer);
16dca0df 1741 lwkt_serialize_init(&sc->bge_jslot_serializer);
984263bc 1742
591dfc77
SZ
1743#ifndef BURN_BRIDGES
1744 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1745 uint32_t irq, mem;
1746
1747 irq = pci_read_config(dev, PCIR_INTLINE, 4);
1748 mem = pci_read_config(dev, BGE_PCI_BAR0, 4);
1749
1750 device_printf(dev, "chip is in D%d power mode "
1751 "-- setting to D0\n", pci_get_powerstate(dev));
1752
1753 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1754
1755 pci_write_config(dev, PCIR_INTLINE, irq, 4);
1756 pci_write_config(dev, BGE_PCI_BAR0, mem, 4);
1757 }
1758#endif /* !BURN_BRIDGE */
1759
984263bc
MD
1760 /*
1761 * Map control/status registers.
1762 */
cc8ddf9e 1763 pci_enable_busmaster(dev);
984263bc
MD
1764
1765 rid = BGE_PCI_BAR0;
cc8ddf9e
JS
1766 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1767 RF_ACTIVE);
984263bc
MD
1768
1769 if (sc->bge_res == NULL) {
c6fd6f3b 1770 device_printf(dev, "couldn't map memory\n");
baf731bb 1771 return ENXIO;
984263bc
MD
1772 }
1773
1774 sc->bge_btag = rman_get_bustag(sc->bge_res);
1775 sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
984263bc 1776
d265721a 1777 /* Save various chip information */
9a6ee7e2
JS
1778 sc->bge_chipid =
1779 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1780 BGE_PCIMISCCTL_ASICREV;
1781 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
1782 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
1783
0ecb11d7
SZ
1784 /* Save chipset family. */
1785 switch (sc->bge_asicrev) {
1786 case BGE_ASICREV_BCM5700:
1787 case BGE_ASICREV_BCM5701:
1788 case BGE_ASICREV_BCM5703:
1789 case BGE_ASICREV_BCM5704:
1790 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
1791 break;
1792
1793 case BGE_ASICREV_BCM5714_A0:
1794 case BGE_ASICREV_BCM5780:
1795 case BGE_ASICREV_BCM5714:
1796 sc->bge_flags |= BGE_FLAG_5714_FAMILY;
1797 /* Fall through */
1798
1799 case BGE_ASICREV_BCM5750:
1800 case BGE_ASICREV_BCM5752:
1801 case BGE_ASICREV_BCM5755:
1802 case BGE_ASICREV_BCM5787:
591dfc77 1803 case BGE_ASICREV_BCM5906:
0ecb11d7
SZ
1804 sc->bge_flags |= BGE_FLAG_575X_PLUS;
1805 /* Fall through */
1806
1807 case BGE_ASICREV_BCM5705:
1808 sc->bge_flags |= BGE_FLAG_5705_PLUS;
1809 break;
1810 }
9a6ee7e2 1811
591dfc77
SZ
1812 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
1813 sc->bge_flags |= BGE_FLAG_NO_EEPROM;
1814
9a6ee7e2 1815 /*
0ecb11d7 1816 * Set various quirk flags.
9a6ee7e2 1817 */
9a6ee7e2 1818
0ecb11d7
SZ
1819 sc->bge_flags |= BGE_FLAG_ETH_WIRESPEED;
1820 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1821 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
1822 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
1823 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
1824 sc->bge_asicrev == BGE_ASICREV_BCM5906)
1825 sc->bge_flags &= ~BGE_FLAG_ETH_WIRESPEED;
1826
1827 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
1828 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
1829 sc->bge_flags |= BGE_FLAG_CRC_BUG;
1830
1831 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
1832 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
1833 sc->bge_flags |= BGE_FLAG_ADC_BUG;
1834
1835 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
1836 sc->bge_flags |= BGE_FLAG_5704_A0_BUG;
1837
1838 if (BGE_IS_5705_PLUS(sc)) {
1839 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1840 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
1841 uint32_t product = pci_get_device(dev);
1842
1843 if (product != PCI_PRODUCT_BROADCOM_BCM5722 &&
1844 product != PCI_PRODUCT_BROADCOM_BCM5756)
1845 sc->bge_flags |= BGE_FLAG_JITTER_BUG;
1846 if (product == PCI_PRODUCT_BROADCOM_BCM5755M)
1847 sc->bge_flags |= BGE_FLAG_ADJUST_TRIM;
1848 } else if (sc->bge_asicrev != BGE_ASICREV_BCM5906) {
1849 sc->bge_flags |= BGE_FLAG_BER_BUG;
9a6ee7e2
JS
1850 }
1851 }
1852
0ecb11d7
SZ
1853 /* Allocate interrupt */
1854 rid = 0;
1855
1856 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1857 RF_SHAREABLE | RF_ACTIVE);
1858
1859 if (sc->bge_irq == NULL) {
1860 device_printf(dev, "couldn't map interrupt\n");
1861 error = ENXIO;
1862 goto fail;
1863 }
1864
1865 /*
1866 * Check if this is a PCI-X or PCI Express device.
1867 */
1868 if (BGE_IS_5705_PLUS(sc)) {
32159cc2 1869 if (pci_is_pcie(dev)) {
0ecb11d7 1870 sc->bge_flags |= BGE_FLAG_PCIE;
01c9014b 1871 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096);
671bd7ed 1872 }
0ecb11d7
SZ
1873 } else {
1874 /*
1875 * Check if the device is in PCI-X Mode.
1876 * (This bit is not valid on PCI Express controllers.)
1877 */
1878 if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1879 BGE_PCISTATE_PCI_BUSMODE) == 0)
1880 sc->bge_flags |= BGE_FLAG_PCIX;
1881 }
1882
d265721a
SZ
1883 device_printf(dev, "CHIP ID 0x%08x; "
1884 "ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
1885 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
1886 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X"
1887 : ((sc->bge_flags & BGE_FLAG_PCIE) ?
1888 "PCI-E" : "PCI"));
b197e64e 1889
c6fd6f3b
JS
1890 ifp = &sc->arpcom.ac_if;
1891 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
984263bc
MD
1892
1893 /* Try to reset the chip. */
1894 bge_reset(sc);
1895
1896 if (bge_chipinit(sc)) {
c6fd6f3b 1897 device_printf(dev, "chip initialization failed\n");
984263bc
MD
1898 error = ENXIO;
1899 goto fail;
1900 }
1901
1902 /*
591dfc77 1903 * Get station address
984263bc 1904 */
591dfc77
SZ
1905 error = bge_get_eaddr(sc, ether_addr);
1906 if (error) {
c6fd6f3b 1907 device_printf(dev, "failed to read station address\n");
984263bc
MD
1908 goto fail;
1909 }
1910
20c9a969 1911 /* 5705/5750 limits RX return ring to 512 entries. */
0ecb11d7 1912 if (BGE_IS_5705_PLUS(sc))
20c9a969
SZ
1913 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1914 else
1915 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
984263bc 1916
20c9a969
SZ
1917 error = bge_dma_alloc(sc);
1918 if (error)
984263bc 1919 goto fail;
984263bc
MD
1920
1921 /* Set default tuneable values. */
1922 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
055d06f0
SZ
1923 sc->bge_rx_coal_ticks = bge_rx_coal_ticks;
1924 sc->bge_tx_coal_ticks = bge_tx_coal_ticks;
1925 sc->bge_rx_max_coal_bds = bge_rx_max_coal_bds;
1926 sc->bge_tx_max_coal_bds = bge_tx_max_coal_bds;
984263bc
MD
1927
1928 /* Set up ifnet structure */
984263bc 1929 ifp->if_softc = sc;
984263bc
MD
1930 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1931 ifp->if_ioctl = bge_ioctl;
984263bc 1932 ifp->if_start = bge_start;
315fe0ee
MD
1933#ifdef DEVICE_POLLING
1934 ifp->if_poll = bge_poll;
1935#endif
984263bc
MD
1936 ifp->if_watchdog = bge_watchdog;
1937 ifp->if_init = bge_init;
1938 ifp->if_mtu = ETHERMTU;
cb623c48 1939 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
936ff230
JS
1940 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1941 ifq_set_ready(&ifp->if_snd);
cb623c48
SZ
1942
1943 /*
1944 * 5700 B0 chips do not support checksumming correctly due
1945 * to hardware bugs.
1946 */
1947 if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) {
1948 ifp->if_capabilities |= IFCAP_HWCSUM;
1949 ifp->if_hwassist = BGE_CSUM_FEATURES;
1950 }
984263bc
MD
1951 ifp->if_capenable = ifp->if_capabilities;
1952
984263bc
MD
1953 /*
1954 * Figure out what sort of media we have by checking the
1955 * hardware config word in the first 32k of NIC internal memory,
1956 * or fall back to examining the EEPROM if necessary.
1957 * Note: on some BCM5700 cards, this value appears to be unset.
1958 * If that's the case, we have to rely on identifying the NIC
1959 * by its PCI subsystem ID, as we do below for the SysKonnect
1960 * SK-9D41.
1961 */
1962 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
1963 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
1964 else {
7b47d9c2
SZ
1965 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
1966 sizeof(hwcfg))) {
1967 device_printf(dev, "failed to read EEPROM\n");
1968 error = ENXIO;
1969 goto fail;
1970 }
984263bc
MD
1971 hwcfg = ntohl(hwcfg);
1972 }
1973
1974 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
0ecb11d7 1975 sc->bge_flags |= BGE_FLAG_TBI;
984263bc
MD
1976
1977 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
cc8ddf9e 1978 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41)
0ecb11d7 1979 sc->bge_flags |= BGE_FLAG_TBI;
984263bc 1980
0ecb11d7 1981 if (sc->bge_flags & BGE_FLAG_TBI) {
984263bc
MD
1982 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
1983 bge_ifmedia_upd, bge_ifmedia_sts);
1984 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
1985 ifmedia_add(&sc->bge_ifmedia,
1986 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
1987 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1988 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
70059b3c 1989 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
984263bc
MD
1990 } else {
1991 /*
1992 * Do transceiver setup.
1993 */
1994 if (mii_phy_probe(dev, &sc->bge_miibus,
1995 bge_ifmedia_upd, bge_ifmedia_sts)) {
c6fd6f3b 1996 device_printf(dev, "MII without any PHY!\n");
984263bc
MD
1997 error = ENXIO;
1998 goto fail;
1999 }
2000 }
2001
2002 /*
2003 * When using the BCM5701 in PCI-X mode, data corruption has
2004 * been observed in the first few bytes of some received packets.
2005 * Aligning the packet buffer in memory eliminates the corruption.
2006 * Unfortunately, this misaligns the packet payloads. On platforms
2007 * which do not support unaligned accesses, we will realign the
2008 * payloads by copying the received packets.
2009 */
0ecb11d7
SZ
2010 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
2011 (sc->bge_flags & BGE_FLAG_PCIX))
2012 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
984263bc 2013
db861466
SZ
2014 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2015 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
2016 sc->bge_link_upd = bge_bcm5700_link_upd;
2017 sc->bge_link_chg = BGE_MACSTAT_MI_INTERRUPT;
0ecb11d7 2018 } else if (sc->bge_flags & BGE_FLAG_TBI) {
db861466
SZ
2019 sc->bge_link_upd = bge_tbi_link_upd;
2020 sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED;
2021 } else {
2022 sc->bge_link_upd = bge_copper_link_upd;
2023 sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED;
2024 }
2025
055d06f0
SZ
2026 /*
2027 * Create sysctl nodes.
2028 */
2029 sysctl_ctx_init(&sc->bge_sysctl_ctx);
2030 sc->bge_sysctl_tree = SYSCTL_ADD_NODE(&sc->bge_sysctl_ctx,
2031 SYSCTL_STATIC_CHILDREN(_hw),
2032 OID_AUTO,
2033 device_get_nameunit(dev),
2034 CTLFLAG_RD, 0, "");
2035 if (sc->bge_sysctl_tree == NULL) {
2036 device_printf(dev, "can't add sysctl node\n");
2037 error = ENXIO;
2038 goto fail;
2039 }
2040
2041 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2042 SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2043 OID_AUTO, "rx_coal_ticks",
2044 CTLTYPE_INT | CTLFLAG_RW,
2045 sc, 0, bge_sysctl_rx_coal_ticks, "I",
2046 "Receive coalescing ticks (usec).");
2047 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2048 SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2049 OID_AUTO, "tx_coal_ticks",
2050 CTLTYPE_INT | CTLFLAG_RW,
2051 sc, 0, bge_sysctl_tx_coal_ticks, "I",
2052 "Transmit coalescing ticks (usec).");
2053 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2054 SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2055 OID_AUTO, "rx_max_coal_bds",
2056 CTLTYPE_INT | CTLFLAG_RW,
2057 sc, 0, bge_sysctl_rx_max_coal_bds, "I",
2058 "Receive max coalesced BD count.");
2059 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2060 SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2061 OID_AUTO, "tx_max_coal_bds",
2062 CTLTYPE_INT | CTLFLAG_RW,
2063 sc, 0, bge_sysctl_tx_max_coal_bds, "I",
2064 "Transmit max coalesced BD count.");
2065
984263bc
MD
2066 /*
2067 * Call MI attach routine.
2068 */
78195a76 2069 ether_ifattach(ifp, ether_addr, NULL);
984263bc 2070
95893fe4 2071 error = bus_setup_intr(dev, sc->bge_irq, INTR_MPSAFE,
78195a76
MD
2072 bge_intr, sc, &sc->bge_intrhand,
2073 ifp->if_serializer);
9a717c15
JS
2074 if (error) {
2075 ether_ifdetach(ifp);
2076 device_printf(dev, "couldn't set up irq\n");
2077 goto fail;
2078 }
9db4b353
SZ
2079
2080 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->bge_irq));
2081 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
2082
9a717c15 2083 return(0);
984263bc 2084fail:
9a717c15 2085 bge_detach(dev);
984263bc
MD
2086 return(error);
2087}
2088
2089static int
33c39a69 2090bge_detach(device_t dev)
984263bc 2091{
9a717c15 2092 struct bge_softc *sc = device_get_softc(dev);
984263bc 2093
9a717c15 2094 if (device_is_attached(dev)) {
baf731bb
SZ
2095 struct ifnet *ifp = &sc->arpcom.ac_if;
2096
cdf89432 2097 lwkt_serialize_enter(ifp->if_serializer);
9a717c15
JS
2098 bge_stop(sc);
2099 bge_reset(sc);
cdf89432
SZ
2100 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2101 lwkt_serialize_exit(ifp->if_serializer);
984263bc 2102
cdf89432
SZ
2103 ether_ifdetach(ifp);
2104 }
baf731bb 2105
0ecb11d7 2106 if (sc->bge_flags & BGE_FLAG_TBI)
984263bc 2107 ifmedia_removeall(&sc->bge_ifmedia);
cbf32d7e 2108 if (sc->bge_miibus)
984263bc 2109 device_delete_child(dev, sc->bge_miibus);
9a717c15 2110 bus_generic_detach(dev);
984263bc 2111
984263bc
MD
2112 if (sc->bge_irq != NULL)
2113 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2114
2115 if (sc->bge_res != NULL)
2116 bus_release_resource(dev, SYS_RES_MEMORY,
2117 BGE_PCI_BAR0, sc->bge_res);
baf731bb 2118
055d06f0
SZ
2119 if (sc->bge_sysctl_tree != NULL)
2120 sysctl_ctx_free(&sc->bge_sysctl_ctx);
2121
baf731bb
SZ
2122 bge_dma_free(sc);
2123
2124 return 0;
984263bc
MD
2125}
2126
2127static void
33c39a69 2128bge_reset(struct bge_softc *sc)
984263bc
MD
2129{
2130 device_t dev;
9a6ee7e2 2131 uint32_t cachesize, command, pcistate, reset;
0ecb11d7 2132 void (*write_op)(struct bge_softc *, uint32_t, uint32_t);
984263bc
MD
2133 int i, val = 0;
2134
2135 dev = sc->bge_dev;
2136
591dfc77
SZ
2137 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
2138 sc->bge_asicrev != BGE_ASICREV_BCM5906) {
0ecb11d7
SZ
2139 if (sc->bge_flags & BGE_FLAG_PCIE)
2140 write_op = bge_writemem_direct;
2141 else
2142 write_op = bge_writemem_ind;
2143 } else {
2144 write_op = bge_writereg_ind;
2145 }
2146
984263bc
MD
2147 /* Save some important PCI state. */
2148 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2149 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2150 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2151
2152 pci_write_config(dev, BGE_PCI_MISC_CTL,
2153 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
20c9a969 2154 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
984263bc 2155
0ecb11d7
SZ
2156 /* Disable fastboot on controllers that support it. */
2157 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
2158 sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2159 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2160 if (bootverbose)
2161 if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n");
2162 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2163 }
2164
2165 /*
2166 * Write the magic number to SRAM at offset 0xB50.
2167 * When firmware finishes its initialization it will
2168 * write ~BGE_MAGIC_NUMBER to the same location.
2169 */
2170 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2171
9a6ee7e2
JS
2172 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2173
2174 /* XXX: Broadcom Linux driver. */
0ecb11d7 2175 if (sc->bge_flags & BGE_FLAG_PCIE) {
9a6ee7e2
JS
2176 if (CSR_READ_4(sc, 0x7e2c) == 0x60) /* PCIE 1.0 */
2177 CSR_WRITE_4(sc, 0x7e2c, 0x20);
2178 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2179 /* Prevent PCIE link training during global reset */
2180 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2181 reset |= (1<<29);
2182 }
2183 }
2184
0ecb11d7
SZ
2185 /*
2186 * Set GPHY Power Down Override to leave GPHY
2187 * powered up in D0 uninitialized.
2188 */
2189 if (BGE_IS_5705_PLUS(sc))
2190 reset |= 0x04000000;
2191
984263bc 2192 /* Issue global reset */
0ecb11d7 2193 write_op(sc, BGE_MISC_CFG, reset);
984263bc 2194
591dfc77
SZ
2195 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2196 uint32_t status, ctrl;
2197
2198 status = CSR_READ_4(sc, BGE_VCPU_STATUS);
2199 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
2200 status | BGE_VCPU_STATUS_DRV_RESET);
2201 ctrl = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
2202 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
2203 ctrl & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
2204 }
2205
984263bc
MD
2206 DELAY(1000);
2207
9a6ee7e2 2208 /* XXX: Broadcom Linux driver. */
0ecb11d7 2209 if (sc->bge_flags & BGE_FLAG_PCIE) {
9a6ee7e2
JS
2210 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2211 uint32_t v;
2212
2213 DELAY(500000); /* wait for link training to complete */
2214 v = pci_read_config(dev, 0xc4, 4);
2215 pci_write_config(dev, 0xc4, v | (1<<15), 4);
2216 }
0ecb11d7
SZ
2217 /*
2218 * Set PCIE max payload size to 128 bytes and
2219 * clear error status.
2220 */
9a6ee7e2
JS
2221 pci_write_config(dev, 0xd8, 0xf5000, 4);
2222 }
2223
984263bc
MD
2224 /* Reset some of the PCI state that got zapped by reset */
2225 pci_write_config(dev, BGE_PCI_MISC_CTL,
2226 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
20c9a969 2227 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
984263bc
MD
2228 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2229 pci_write_config(dev, BGE_PCI_CMD, command, 4);
0ecb11d7 2230 write_op(sc, BGE_MISC_CFG, (65 << 1));
984263bc 2231
a313b56f 2232 /* Enable memory arbiter. */
0ecb11d7
SZ
2233 if (BGE_IS_5714_FAMILY(sc)) {
2234 uint32_t val;
2235
2236 val = CSR_READ_4(sc, BGE_MARB_MODE);
2237 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2238 } else {
a313b56f 2239 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
0ecb11d7 2240 }
a313b56f 2241
591dfc77
SZ
2242 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2243 for (i = 0; i < BGE_TIMEOUT; i++) {
2244 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2245 if (val & BGE_VCPU_STATUS_INIT_DONE)
2246 break;
2247 DELAY(100);
2248 }
2249 if (i == BGE_TIMEOUT) {
2250 if_printf(&sc->arpcom.ac_if, "reset timed out\n");
2251 return;
2252 }
2253 } else {
2254 /*
2255 * Poll until we see the 1's complement of the magic number.
2256 * This indicates that the firmware initialization
2257 * is complete.
2258 */
d880f7b3 2259 for (i = 0; i < BGE_FIRMWARE_TIMEOUT; i++) {
591dfc77
SZ
2260 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2261 if (val == ~BGE_MAGIC_NUMBER)
2262 break;
2263 DELAY(10);
2264 }
d880f7b3 2265 if (i == BGE_FIRMWARE_TIMEOUT) {
591dfc77
SZ
2266 if_printf(&sc->arpcom.ac_if, "firmware handshake "
2267 "timed out, found 0x%08x\n", val);
2268 return;
2269 }
984263bc
MD
2270 }
2271
2272 /*
2273 * XXX Wait for the value of the PCISTATE register to
2274 * return to its original pre-reset state. This is a
2275 * fairly good indicator of reset completion. If we don't
2276 * wait for the reset to fully complete, trying to read
2277 * from the device's non-PCI registers may yield garbage
2278 * results.
2279 */
2280 for (i = 0; i < BGE_TIMEOUT; i++) {
2281 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2282 break;
2283 DELAY(10);
2284 }
2285
0ecb11d7
SZ
2286 if (sc->bge_flags & BGE_FLAG_PCIE) {
2287 reset = bge_readmem_ind(sc, 0x7c00);
2288 bge_writemem_ind(sc, 0x7c00, reset | (1 << 25));
2289 }
2290
984263bc 2291 /* Fix up byte swapping */
20c9a969 2292 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
984263bc
MD
2293 BGE_MODECTL_BYTESWAP_DATA);
2294
2295 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2296
70059b3c
JS
2297 /*
2298 * The 5704 in TBI mode apparently needs some special
2299 * adjustment to insure the SERDES drive level is set
2300 * to 1.2V.
2301 */
0ecb11d7
SZ
2302 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
2303 (sc->bge_flags & BGE_FLAG_TBI)) {
70059b3c
JS
2304 uint32_t serdescfg;
2305
2306 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2307 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2308 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2309 }
2310
9a6ee7e2 2311 /* XXX: Broadcom Linux driver. */
0ecb11d7
SZ
2312 if ((sc->bge_flags & BGE_FLAG_PCIE) &&
2313 sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
9a6ee7e2 2314 uint32_t v;
984263bc 2315
9a6ee7e2
JS
2316 v = CSR_READ_4(sc, 0x7c00);
2317 CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2318 }
2319
2320 DELAY(10000);
984263bc
MD
2321}
2322
2323/*
2324 * Frame reception handling. This is called if there's a frame
2325 * on the receive return list.
2326 *
2327 * Note: we have to be able to handle two possibilities here:
2328 * 1) the frame is from the jumbo recieve ring
2329 * 2) the frame is from the standard receive ring
2330 */
2331
2332static void
33c39a69 2333bge_rxeof(struct bge_softc *sc)
984263bc
MD
2334{
2335 struct ifnet *ifp;
2336 int stdcnt = 0, jumbocnt = 0;
a7db2caa 2337 struct mbuf_chain chain[MAXCPU];
984263bc 2338
449e06cc 2339 if (sc->bge_rx_saved_considx ==
20c9a969 2340 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx)
449e06cc
SZ
2341 return;
2342
a7db2caa 2343 ether_input_chain_init(chain);
a7db2caa 2344
984263bc
MD
2345 ifp = &sc->arpcom.ac_if;
2346
20c9a969
SZ
2347 while (sc->bge_rx_saved_considx !=
2348 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
984263bc 2349 struct bge_rx_bd *cur_rx;
33c39a69 2350 uint32_t rxidx;
984263bc 2351 struct mbuf *m = NULL;
33c39a69 2352 uint16_t vlan_tag = 0;
984263bc
MD
2353 int have_tag = 0;
2354
2355 cur_rx =
20c9a969 2356 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
984263bc
MD
2357
2358 rxidx = cur_rx->bge_idx;
7e40b8c5 2359 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
6b880771 2360 logif(rx_pkt);
984263bc
MD
2361
2362 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2363 have_tag = 1;
2364 vlan_tag = cur_rx->bge_vlan_tag;
2365 }
2366
2367 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2368 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
984263bc 2369 jumbocnt++;
1436f9a0
SZ
2370
2371 if (rxidx != sc->bge_jumbo) {
2372 ifp->if_ierrors++;
2373 if_printf(ifp, "sw jumbo index(%d) "
2374 "and hw jumbo index(%d) mismatch, drop!\n",
2375 sc->bge_jumbo, rxidx);
2376 bge_setup_rxdesc_jumbo(sc, rxidx);
2377 continue;
2378 }
2379
2380 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx].bge_mbuf;
984263bc
MD
2381 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2382 ifp->if_ierrors++;
1436f9a0 2383 bge_setup_rxdesc_jumbo(sc, sc->bge_jumbo);
984263bc
MD
2384 continue;
2385 }
1436f9a0 2386 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 0)) {
984263bc 2387 ifp->if_ierrors++;
1436f9a0 2388 bge_setup_rxdesc_jumbo(sc, sc->bge_jumbo);
984263bc
MD
2389 continue;
2390 }
2391 } else {
2392 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
984263bc 2393 stdcnt++;
1436f9a0
SZ
2394
2395 if (rxidx != sc->bge_std) {
2396 ifp->if_ierrors++;
2397 if_printf(ifp, "sw std index(%d) "
2398 "and hw std index(%d) mismatch, drop!\n",
2399 sc->bge_std, rxidx);
2400 bge_setup_rxdesc_std(sc, rxidx);
2401 continue;
2402 }
2403
2404 m = sc->bge_cdata.bge_rx_std_chain[rxidx].bge_mbuf;
984263bc
MD
2405 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2406 ifp->if_ierrors++;
1436f9a0 2407 bge_setup_rxdesc_std(sc, sc->bge_std);
984263bc
MD
2408 continue;
2409 }
1436f9a0 2410 if (bge_newbuf_std(sc, sc->bge_std, 0)) {
984263bc 2411 ifp->if_ierrors++;
1436f9a0 2412 bge_setup_rxdesc_std(sc, sc->bge_std);
984263bc
MD
2413 continue;
2414 }
2415 }
2416
2417 ifp->if_ipackets++;
2418#ifndef __i386__
2419 /*
2420 * The i386 allows unaligned accesses, but for other
2421 * platforms we must make sure the payload is aligned.
2422 */
0ecb11d7 2423 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
984263bc
MD
2424 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2425 cur_rx->bge_len);
2426 m->m_data += ETHER_ALIGN;
2427 }
2428#endif
160185fa 2429 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
984263bc
MD
2430 m->m_pkthdr.rcvif = ifp;
2431
cb623c48
SZ
2432 if (ifp->if_capenable & IFCAP_RXCSUM) {
2433 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2434 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2435 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2436 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2437 }
17240569 2438 if ((cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) &&
cb623c48 2439 m->m_pkthdr.len >= BGE_MIN_FRAME) {
984263bc 2440 m->m_pkthdr.csum_data =
17240569 2441 cur_rx->bge_tcp_udp_csum;
bf29e666
SZ
2442 m->m_pkthdr.csum_flags |=
2443 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
984263bc
MD
2444 }
2445 }
984263bc
MD
2446
2447 /*
2448 * If we received a packet with a vlan tag, pass it
2449 * to vlan_input() instead of ether_input().
2450 */
2451 if (have_tag) {
e6b5847c
SZ
2452 m->m_flags |= M_VLANTAG;
2453 m->m_pkthdr.ether_vlantag = vlan_tag;
984263bc 2454 have_tag = vlan_tag = 0;
984263bc 2455 }
2eb0d069 2456 ether_input_chain(ifp, m, NULL, chain);
984263bc
MD
2457 }
2458
a7db2caa 2459 ether_input_dispatch(chain);
a7db2caa 2460
591dfc77 2461 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
984263bc 2462 if (stdcnt)
591dfc77 2463 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
984263bc 2464 if (jumbocnt)
591dfc77 2465 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
984263bc
MD
2466}
2467
2468static void
33c39a69 2469bge_txeof(struct bge_softc *sc)
984263bc
MD
2470{
2471 struct bge_tx_bd *cur_tx = NULL;
2472 struct ifnet *ifp;
2473
449e06cc 2474 if (sc->bge_tx_saved_considx ==
20c9a969 2475 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
449e06cc
SZ
2476 return;
2477
984263bc
MD
2478 ifp = &sc->arpcom.ac_if;
2479
2480 /*
2481 * Go through our tx ring and free mbufs for those
2482 * frames that have been sent.
2483 */
2484 while (sc->bge_tx_saved_considx !=
20c9a969
SZ
2485 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2486 uint32_t idx = 0;
984263bc
MD
2487
2488 idx = sc->bge_tx_saved_considx;
20c9a969 2489 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
984263bc
MD
2490 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2491 ifp->if_opackets++;
2492 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
ddca511d 2493 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
20c9a969 2494 sc->bge_cdata.bge_tx_dmamap[idx]);
984263bc
MD
2495 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2496 sc->bge_cdata.bge_tx_chain[idx] = NULL;
2497 }
2498 sc->bge_txcnt--;
2499 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
6b880771 2500 logif(tx_pkt);
984263bc
MD
2501 }
2502
20c9a969
SZ
2503 if (cur_tx != NULL &&
2504 (BGE_TX_RING_CNT - sc->bge_txcnt) >=
2505 (BGE_NSEG_RSVD + BGE_NSEG_SPARE))
984263bc 2506 ifp->if_flags &= ~IFF_OACTIVE;
20c9a969 2507
142ca760
SZ
2508 if (sc->bge_txcnt == 0)
2509 ifp->if_timer = 0;
2510
20c9a969 2511 if (!ifq_is_empty(&ifp->if_snd))
9db4b353 2512 if_devstart(ifp);
984263bc
MD
2513}
2514
315fe0ee
MD
2515#ifdef DEVICE_POLLING
2516
2517static void
2518bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2519{
2520 struct bge_softc *sc = ifp->if_softc;
2521 uint32_t status;
2522
2523 switch(cmd) {
2524 case POLL_REGISTER:
ba39cc82 2525 bge_disable_intr(sc);
315fe0ee
MD
2526 break;
2527 case POLL_DEREGISTER:
ba39cc82 2528 bge_enable_intr(sc);
315fe0ee
MD
2529 break;
2530 case POLL_AND_CHECK_STATUS:
315fe0ee
MD
2531 /*
2532 * Process link state changes.
2533 */
2534 status = CSR_READ_4(sc, BGE_MAC_STS);
2535 if ((status & sc->bge_link_chg) || sc->bge_link_evt) {
2536 sc->bge_link_evt = 0;
2537 sc->bge_link_upd(sc, status);
2538 }
2539 /* fall through */
2540 case POLL_ONLY:
2541 if (ifp->if_flags & IFF_RUNNING) {
2542 bge_rxeof(sc);
2543 bge_txeof(sc);
2544 }
2545 break;
2546 }
2547}
2548
2549#endif
2550
984263bc 2551static void
33c39a69 2552bge_intr(void *xsc)
984263bc 2553{
bf522c7f 2554 struct bge_softc *sc = xsc;
33c39a69 2555 struct ifnet *ifp = &sc->arpcom.ac_if;
6b880771
SZ
2556 uint32_t status;
2557
2558 logif(intr);
0029ccf6 2559
142ca760
SZ
2560 /*
2561 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
2562 * disable interrupts by writing nonzero like we used to, since with
2563 * our current organization this just gives complications and
2564 * pessimizations for re-enabling interrupts. We used to have races
2565 * instead of the necessary complications. Disabling interrupts
2566 * would just reduce the chance of a status update while we are
2567 * running (by switching to the interrupt-mode coalescence
2568 * parameters), but this chance is already very low so it is more
2569 * efficient to get another interrupt than prevent it.
2570 *
2571 * We do the ack first to ensure another interrupt if there is a
2572 * status update after the ack. We don't check for the status
2573 * changing later because it is more efficient to get another
2574 * interrupt than prevent it, not quite as above (not checking is
2575 * a smaller optimization than not toggling the interrupt enable,
2576 * since checking doesn't involve PCI accesses and toggling require
2577 * the status check). So toggling would probably be a pessimization
2578 * even with MSI. It would only be needed for using a task queue.
2579 */
591dfc77 2580 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
142ca760 2581
984263bc
MD
2582 /*
2583 * Process link state changes.
984263bc 2584 */
db861466
SZ
2585 status = CSR_READ_4(sc, BGE_MAC_STS);
2586 if ((status & sc->bge_link_chg) || sc->bge_link_evt) {
2587 sc->bge_link_evt = 0;
2588 sc->bge_link_upd(sc, status);
984263bc
MD
2589 }
2590
2591 if (ifp->if_flags & IFF_RUNNING) {
2592 /* Check RX return ring producer/consumer */
2593 bge_rxeof(sc);
2594
2595 /* Check TX ring producer/consumer */
2596 bge_txeof(sc);
2597 }
055d06f0
SZ
2598
2599 if (sc->bge_coal_chg)
2600 bge_coal_change(sc);
984263bc
MD
2601}
2602
2603static void
33c39a69 2604bge_tick(void *xsc)
78195a76
MD
2605{
2606 struct bge_softc *sc = xsc;
2607 struct ifnet *ifp = &sc->arpcom.ac_if;
2608
2609 lwkt_serialize_enter(ifp->if_serializer);
984263bc 2610
0ecb11d7 2611 if (BGE_IS_5705_PLUS(sc))
7e40b8c5
HP
2612 bge_stats_update_regs(sc);
2613 else
2614 bge_stats_update(sc);
9a717c15 2615
0ecb11d7 2616 if (sc->bge_flags & BGE_FLAG_TBI) {
db861466
SZ
2617 /*
2618 * Since in TBI mode auto-polling can't be used we should poll
2619 * link status manually. Here we register pending link event
2620 * and trigger interrupt.
2621 */
2622 sc->bge_link_evt++;
2623 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3f82ed83 2624 } else if (!sc->bge_link) {
db861466 2625 mii_tick(device_get_softc(sc->bge_miibus));
984263bc
MD
2626 }
2627
db861466
SZ
2628 callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2629
2630 lwkt_serialize_exit(ifp->if_serializer);
984263bc
MD
2631}
2632
7e40b8c5 2633static void
33c39a69 2634bge_stats_update_regs(struct bge_softc *sc)
7e40b8c5 2635{
33c39a69 2636 struct ifnet *ifp = &sc->arpcom.ac_if;
7e40b8c5 2637 struct bge_mac_stats_regs stats;
33c39a69 2638 uint32_t *s;
7e40b8c5
HP
2639 int i;
2640
33c39a69 2641 s = (uint32_t *)&stats;
7e40b8c5
HP
2642 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2643 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2644 s++;
2645 }
2646
2647 ifp->if_collisions +=
2648 (stats.dot3StatsSingleCollisionFrames +
2649 stats.dot3StatsMultipleCollisionFrames +
2650 stats.dot3StatsExcessiveCollisions +
2651 stats.dot3StatsLateCollisions) -
2652 ifp->if_collisions;
7e40b8c5
HP
2653}
2654
984263bc 2655static void
33c39a69 2656bge_stats_update(struct bge_softc *sc)
984263bc 2657{
33c39a69 2658 struct ifnet *ifp = &sc->arpcom.ac_if;
20c9a969
SZ
2659 bus_size_t stats;
2660
2661 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
984263bc 2662
20c9a969
SZ
2663#define READ_STAT(sc, stats, stat) \
2664 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
984263bc
MD
2665
2666 ifp->if_collisions +=
20c9a969
SZ
2667 (READ_STAT(sc, stats,
2668 txstats.dot3StatsSingleCollisionFrames.bge_addr_lo) +
2669 READ_STAT(sc, stats,
2670 txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo) +
2671 READ_STAT(sc, stats,
2672 txstats.dot3StatsExcessiveCollisions.bge_addr_lo) +
2673 READ_STAT(sc, stats,
2674 txstats.dot3StatsLateCollisions.bge_addr_lo)) -
984263bc
MD
2675 ifp->if_collisions;
2676
20c9a969
SZ
2677#undef READ_STAT
2678
984263bc
MD
2679#ifdef notdef
2680 ifp->if_collisions +=
2681 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2682 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2683 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2684 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2685 ifp->if_collisions;
2686#endif
984263bc
MD
2687}
2688
2689/*
2690 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2691 * pointers to descriptors.
2692 */
2693static int
4a607ed6 2694bge_encap(struct bge_softc *sc, struct mbuf **m_head0, uint32_t *txidx)
984263bc 2695{
20c9a969 2696 struct bge_tx_bd *d = NULL;
33c39a69 2697 uint16_t csum_flags = 0;
20c9a969
SZ
2698 bus_dma_segment_t segs[BGE_NSEG_NEW];
2699 bus_dmamap_t map;
2de621e9 2700 int error, maxsegs, nsegs, idx, i;
4a607ed6 2701 struct mbuf *m_head = *m_head0;
984263bc 2702
984263bc
MD
2703 if (m_head->m_pkthdr.csum_flags) {
2704 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2705 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2706 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2707 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2708 if (m_head->m_flags & M_LASTFRAG)
2709 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2710 else if (m_head->m_flags & M_FRAG)
2711 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2712 }
20c9a969
SZ
2713
2714 idx = *txidx;
2715 map = sc->bge_cdata.bge_tx_dmamap[idx];
2716
2717 maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - BGE_NSEG_RSVD;
2718 KASSERT(maxsegs >= BGE_NSEG_SPARE,
2719 ("not enough segments %d\n", maxsegs));
2720
2721 if (maxsegs > BGE_NSEG_NEW)
2722 maxsegs = BGE_NSEG_NEW;
2723
cb623c48
SZ
2724 /*
2725 * Pad outbound frame to BGE_MIN_FRAME for an unusual reason.
2726 * The bge hardware will pad out Tx runts to BGE_MIN_FRAME,
2727 * but when such padded frames employ the bge IP/TCP checksum
2728 * offload, the hardware checksum assist gives incorrect results
2729 * (possibly from incorporating its own padding into the UDP/TCP
2730 * checksum; who knows). If we pad such runts with zeros, the
2679514c 2731 * onboard checksum comes out correct.
cb623c48
SZ
2732 */
2733 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
2734 m_head->m_pkthdr.len < BGE_MIN_FRAME) {
cf12ba3c 2735 error = m_devpad(m_head, BGE_MIN_FRAME);
2679514c
SZ
2736 if (error)
2737 goto back;
cb623c48 2738 }
2679514c 2739
2de621e9
SZ
2740 error = bus_dmamap_load_mbuf_defrag(sc->bge_cdata.bge_tx_mtag, map,
2741 m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
2742 if (error)
20c9a969 2743 goto back;
984263bc 2744
2de621e9 2745 m_head = *m_head0;
ddca511d 2746 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
984263bc 2747
20c9a969
SZ
2748 for (i = 0; ; i++) {
2749 d = &sc->bge_ldata.bge_tx_ring[idx];
984263bc 2750
2de621e9
SZ
2751 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
2752 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
20c9a969
SZ
2753 d->bge_len = segs[i].ds_len;
2754 d->bge_flags = csum_flags;
984263bc 2755
2de621e9 2756 if (i == nsegs - 1)
20c9a969
SZ
2757 break;
2758 BGE_INC(idx, BGE_TX_RING_CNT);
2759 }
2760 /* Mark the last segment as end of packet... */
2761 d->bge_flags |= BGE_TXBDFLAG_END;
984263bc 2762
20c9a969
SZ
2763 /* Set vlan tag to the first segment of the packet. */
2764 d = &sc->bge_ldata.bge_tx_ring[*txidx];
83790f85 2765 if (m_head->m_flags & M_VLANTAG) {
20c9a969 2766 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
83790f85 2767 d->bge_vlan_tag = m_head->m_pkthdr.ether_vlantag;
20c9a969
SZ
2768 } else {
2769 d->bge_vlan_tag = 0;
2770 }
2771
2772 /*
2773 * Insure that the map for this transmission is placed at
2774 * the array index of the last descriptor in this chain.
2775 */
2776 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
2777 sc->bge_cdata.bge_tx_dmamap[idx] = map;
2778 sc->bge_cdata.bge_tx_chain[idx] = m_head;
2de621e9 2779 sc->bge_txcnt += nsegs;
20c9a969
SZ
2780
2781 BGE_INC(idx, BGE_TX_RING_CNT);
2782 *txidx = idx;
2783back:
4a607ed6 2784 if (error) {
2de621e9 2785 m_freem(*m_head0);
4a607ed6
SZ
2786 *m_head0 = NULL;
2787 }
20c9a969 2788 return error;
984263bc
MD
2789}
2790
2791/*
2792 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2793 * to the mbuf data regions directly in the transmit descriptors.
2794 */
2795static void
33c39a69 2796bge_start(struct ifnet *ifp)
984263bc 2797{
20c9a969 2798 struct bge_softc *sc = ifp->if_softc;
984263bc 2799 struct mbuf *m_head = NULL;
20c9a969 2800 uint32_t prodidx;
2f54d1d2 2801 int need_trans;
984263bc 2802
d47d96f2 2803 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
984263bc
MD
2804 return;
2805
94db8384 2806 prodidx = sc->bge_tx_prodidx;
984263bc 2807
2f54d1d2 2808 need_trans = 0;
75544bcd 2809 while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
9db4b353 2810 m_head = ifq_dequeue(&ifp->if_snd, NULL);
984263bc
MD
2811 if (m_head == NULL)
2812 break;
2813
2814 /*
cb623c48
SZ
2815 * XXX
2816 * The code inside the if() block is never reached since we
2817 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
2818 * requests to checksum TCP/UDP in a fragmented packet.
2819 *
984263bc
MD
2820 * XXX
2821 * safety overkill. If this is a fragmented packet chain
2822 * with delayed TCP/UDP checksums, then only encapsulate
2823 * it if we have enough descriptors to handle the entire
2824 * chain at once.
2825 * (paranoia -- may not actually be needed)
2826 */
9db4b353
SZ
2827 if ((m_head->m_flags & M_FIRSTFRAG) &&
2828 (m_head->m_pkthdr.csum_flags & CSUM_DELAY_DATA)) {
984263bc 2829 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
9db4b353 2830 m_head->m_pkthdr.csum_data + BGE_NSEG_RSVD) {
984263bc 2831 ifp->if_flags |= IFF_OACTIVE;
9db4b353 2832 ifq_prepend(&ifp->if_snd, m_head);
984263bc
MD
2833 break;
2834 }
2835 }
2836
20c9a969
SZ
2837 /*
2838 * Sanity check: avoid coming within BGE_NSEG_RSVD
2839 * descriptors of the end of the ring. Also make
2840 * sure there are BGE_NSEG_SPARE descriptors for
2841 * jumbo buffers' defragmentation.
2842 */
2843 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2844 (BGE_NSEG_RSVD + BGE_NSEG_SPARE)) {
2845 ifp->if_flags |= IFF_OACTIVE;
9db4b353 2846 ifq_prepend(&ifp->if_snd, m_head);
20c9a969
SZ
2847 break;
2848 }
2849
984263bc
MD
2850 /*
2851 * Pack the data into the transmit ring. If we
2852 * don't have room, set the OACTIVE flag and wait
2853 * for the NIC to drain the ring.
2854 */
4a607ed6 2855 if (bge_encap(sc, &m_head, &prodidx)) {
984263bc 2856 ifp->if_flags |= IFF_OACTIVE;
2679514c 2857 ifp->if_oerrors++;
984263bc
MD
2858 break;
2859 }
2f54d1d2 2860 need_trans = 1;
984263bc 2861
b637f170 2862 ETHER_BPF_MTAP(ifp, m_head);
984263bc
MD
2863 }
2864
2f54d1d2
SZ
2865 if (!need_trans)
2866 return;
2867
984263bc 2868 /* Transmit */
591dfc77 2869 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
984263bc
MD
2870 /* 5700 b2 errata */
2871 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
591dfc77 2872 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
984263bc 2873
94db8384
SZ
2874 sc->bge_tx_prodidx = prodidx;
2875
984263bc
MD
2876 /*
2877 * Set a timeout in case the chip goes out to lunch.
2878 */
2879 ifp->if_timer = 5;
984263bc
MD
2880}
2881
2882static void
33c39a69 2883bge_init(void *xsc)
984263bc
MD
2884{
2885 struct bge_softc *sc = xsc;
33c39a69
JS
2886 struct ifnet *ifp = &sc->arpcom.ac_if;
2887 uint16_t *m;
984263bc 2888
aa65409c
SZ
2889 ASSERT_SERIALIZED(ifp->if_serializer);
2890
2891 if (ifp->if_flags & IFF_RUNNING)
984263bc 2892 return;
984263bc
MD
2893
2894 /* Cancel pending I/O and flush buffers. */
2895 bge_stop(sc);
2896 bge_reset(sc);
2897 bge_chipinit(sc);
2898
2899 /*
2900 * Init the various state machines, ring
2901 * control blocks and firmware.
2902 */
2903 if (bge_blockinit(sc)) {
c6fd6f3b 2904 if_printf(ifp, "initialization failure\n");
1436f9a0 2905 bge_stop(sc);
984263bc
MD
2906 return;
2907 }
2908
984263bc
MD
2909 /* Specify MTU. */
2910 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
011c0f93 2911 ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
984263bc
MD
2912
2913 /* Load our MAC address. */
33c39a69 2914 m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
984263bc
MD
2915 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2916 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2917
2918 /* Enable or disable promiscuous mode as needed. */
6439b28a 2919 bge_setpromisc(sc);
984263bc
MD
2920
2921 /* Program multicast filter. */
2922 bge_setmulti(sc);
2923
2924 /* Init RX ring. */
1436f9a0
SZ
2925 if (bge_init_rx_ring_std(sc)) {
2926 if_printf(ifp, "RX ring initialization failed\n");
2927 bge_stop(sc);
2928 return;
2929 }
984263bc 2930
7e40b8c5
HP
2931 /*
2932 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
2933 * memory to insure that the chip has in fact read the first
2934 * entry of the ring.
2935 */
2936 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
33c39a69 2937 uint32_t v, i;
7e40b8c5
HP
2938 for (i = 0; i < 10; i++) {
2939 DELAY(20);
2940 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
2941 if (v == (MCLBYTES - ETHER_ALIGN))
2942 break;
2943 }
2944 if (i == 10)
c6fd6f3b 2945 if_printf(ifp, "5705 A0 chip failed to load RX ring\n");
7e40b8c5
HP
2946 }
2947
984263bc 2948 /* Init jumbo RX ring. */
1436f9a0
SZ
2949 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) {
2950 if (bge_init_rx_ring_jumbo(sc)) {
2951 if_printf(ifp, "Jumbo RX ring initialization failed\n");
2952 bge_stop(sc);
2953 return;
2954 }
2955 }
984263bc
MD
2956
2957 /* Init our RX return ring index */
2958 sc->bge_rx_saved_considx = 0;
2959
2960 /* Init TX ring. */
2961 bge_init_tx_ring(sc);
2962
2963 /* Turn on transmitter */
2964 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
2965
2966 /* Turn on receiver */
2967 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2968
2969 /* Tell firmware we're alive. */
2970 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2971
ba39cc82 2972 /* Enable host interrupts if polling(4) is not enabled. */
984263bc 2973 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
315fe0ee 2974#ifdef DEVICE_POLLING
ba39cc82
SZ
2975 if (ifp->if_flags & IFF_POLLING)
2976 bge_disable_intr(sc);
2977 else
315fe0ee 2978#endif
ba39cc82 2979 bge_enable_intr(sc);
984263bc
MD
2980
2981 bge_ifmedia_upd(ifp);
2982
2983 ifp->if_flags |= IFF_RUNNING;
2984 ifp->if_flags &= ~IFF_OACTIVE;
2985
263489fb 2986 callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
984263bc
MD
2987}
2988
2989/*
2990 * Set media options.
2991 */
2992static int
33c39a69 2993bge_ifmedia_upd(struct ifnet *ifp)
984263bc 2994{
33c39a69 2995 struct bge_softc *sc = ifp->if_softc;
984263bc
MD
2996
2997 /* If this is a 1000baseX NIC, enable the TBI port. */
0ecb11d7 2998 if (sc->bge_flags & BGE_FLAG_TBI) {
db861466
SZ
2999 struct ifmedia *ifm = &sc->bge_ifmedia;
3000
984263bc
MD
3001 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3002 return(EINVAL);
db861466 3003
984263bc
MD
3004 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3005 case IFM_AUTO:
70059b3c
JS
3006 /*
3007 * The BCM5704 ASIC appears to have a special
3008 * mechanism for programming the autoneg
3009 * advertisement registers in TBI mode.
3010 */
5c56d5d8
SZ
3011 if (!bge_fake_autoneg &&
3012 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
70059b3c
JS
3013 uint32_t sgdig;
3014
3015 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3016 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3017 sgdig |= BGE_SGDIGCFG_AUTO |
3018 BGE_SGDIGCFG_PAUSE_CAP |
3019 BGE_SGDIGCFG_ASYM_PAUSE;
3020 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3021 sgdig | BGE_SGDIGCFG_SEND);
3022 DELAY(5);
3023 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3024 }
984263bc
MD
3025 break;
3026 case IFM_1000_SX:
3027 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3028 BGE_CLRBIT(sc, BGE_MAC_MODE,
3029 BGE_MACMODE_HALF_DUPLEX);
3030 } else {
3031 BGE_SETBIT(sc, BGE_MAC_MODE,
3032 BGE_MACMODE_HALF_DUPLEX);
3033 }
3034 break;
3035 default:
3036 return(EINVAL);
3037 }
db861466
SZ
3038 } else {
3039 struct mii_data *mii = device_get_softc(sc->bge_miibus);
984263bc 3040
db861466 3041 sc->bge_link_evt++;
3f82ed83 3042 sc->bge_link = 0;
db861466
SZ
3043 if (mii->mii_instance) {
3044 struct mii_softc *miisc;
984263bc 3045
db861466
SZ
3046 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3047 mii_phy_reset(miisc);
3048 }
3049 mii_mediachg(mii);
3050 }
984263bc
MD
3051 return(0);
3052}
3053
3054/*
3055 * Report current media status.
3056 */
3057static void
33c39a69 3058bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
984263bc 3059{
33c39a69 3060 struct bge_softc *sc = ifp->if_softc;
984263bc 3061
0ecb11d7 3062 if (sc->bge_flags & BGE_FLAG_TBI) {
984263bc
MD
3063 ifmr->ifm_status = IFM_AVALID;
3064 ifmr->ifm_active = IFM_ETHER;
3065 if (CSR_READ_4(sc, BGE_MAC_STS) &
db861466 3066 BGE_MACSTAT_TBI_PCS_SYNCHED) {
984263bc 3067 ifmr->ifm_status |= IFM_ACTIVE;
db861466
SZ
3068 } else {
3069 ifmr->ifm_active |= IFM_NONE;
3070 return;
3071 }
3072
984263bc
MD
3073 ifmr->ifm_active |= IFM_1000_SX;
3074 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3075 ifmr->ifm_active |= IFM_HDX;
3076 else
3077 ifmr->ifm_active |= IFM_FDX;
db861466
SZ
3078 } else {
3079 struct mii_data *mii = device_get_softc(sc->bge_miibus);
984263bc 3080
db861466
SZ
3081 mii_pollstat(mii);
3082 ifmr->ifm_active = mii->mii_media_active;
3083 ifmr->ifm_status = mii->mii_media_status;
3084 }
984263bc
MD
3085}
3086
3087static int
33c39a69 3088bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
984263bc
MD
3089{
3090 struct bge_softc *sc = ifp->if_softc;
98dabdac 3091 struct ifreq *ifr = (struct ifreq *)data;
9a717c15 3092 int mask, error = 0;
984263bc 3093
aa65409c
SZ
3094 ASSERT_SERIALIZED(ifp->if_serializer);
3095
98dabdac 3096 switch (command) {
984263bc 3097 case SIOCSIFMTU:
0ecb11d7
SZ
3098 if ((!BGE_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) ||
3099 (BGE_IS_JUMBO_CAPABLE(sc) &&
3100 ifr->ifr_mtu > BGE_JUMBO_MTU)) {
984263bc 3101 error = EINVAL;
0ecb11d7 3102 } else if (ifp->if_mtu != ifr->ifr_mtu) {
984263bc
MD
3103 ifp->if_mtu = ifr->ifr_mtu;
3104 ifp->if_flags &= ~IFF_RUNNING;
3105 bge_init(sc);
3106 }
3107 break;
3108 case SIOCSIFFLAGS:
3109 if (ifp->if_flags & IFF_UP) {
6439b28a 3110 if (ifp->if_flags & IFF_RUNNING) {
98dabdac 3111 mask = ifp->if_flags ^ sc->bge_if_flags;
6439b28a
SZ
3112
3113 /*
3114 * If only the state of the PROMISC flag
3115 * changed, then just use the 'set promisc
3116 * mode' command instead of reinitializing
3117 * the entire NIC. Doing a full re-init
3118 * means reloading the firmware and waiting
3119 * for it to start up, which may take a
3120 * second or two. Similarly for ALLMULTI.
3121 */
98dabdac 3122 if (mask & IFF_PROMISC)
6439b28a 3123 bge_setpromisc(sc);
98dabdac 3124 if (mask & IFF_ALLMULTI)
6439b28a
SZ
3125 bge_setmulti(sc);
3126 } else {
984263bc 3127 bge_init(sc);
6439b28a 3128 }
984263bc 3129 } else {
aa65409c 3130 if (ifp->if_flags & IFF_RUNNING)
984263bc 3131 bge_stop(sc);
984263bc
MD
3132 }
3133 sc->bge_if_flags = ifp->if_flags;
984263bc
MD
3134 break;
3135 case SIOCADDMULTI:
3136 case SIOCDELMULTI:
98dabdac 3137 if (ifp->if_flags & IFF_RUNNING)
984263bc 3138 bge_setmulti(sc);
984263bc
MD
3139 break;
3140 case SIOCSIFMEDIA:
3141 case SIOCGIFMEDIA:
0ecb11d7 3142 if (sc->bge_flags & BGE_FLAG_TBI) {
984263bc
MD
3143 error = ifmedia_ioctl(ifp, ifr,
3144 &sc->bge_ifmedia, command);
3145 } else {
98dabdac
SZ
3146 struct mii_data *mii;
3147
984263bc
MD
3148 mii = device_get_softc(sc->bge_miibus);
3149 error = ifmedia_ioctl(ifp, ifr,
98dabdac 3150 &mii->mii_media, command);
984263bc
MD
3151 }
3152 break;
3153 case SIOCSIFCAP:
3154 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3155 if (mask & IFCAP_HWCSUM) {
71e2c3e7 3156 ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
984263bc 3157 if (IFCAP_HWCSUM & ifp->if_capenable)
cb623c48 3158 ifp->if_hwassist = BGE_CSUM_FEATURES;
984263bc 3159 else
cb623c48 3160 ifp->if_hwassist = 0;
984263bc 3161 }
984263bc
MD
3162 break;
3163 default:
4cde4dd5 3164 error = ether_ioctl(ifp, command, data);
984263bc
MD
3165 break;
3166 }
98dabdac 3167 return error;
984263bc
MD
3168}
3169
3170static void
33c39a69 3171bge_watchdog(struct ifnet *ifp)
984263bc 3172{
33c39a69 3173 struct bge_softc *sc = ifp->if_softc;
984263bc 3174
c6fd6f3b 3175 if_printf(ifp, "watchdog timeout -- resetting\n");
984263bc
MD
3176
3177 ifp->if_flags &= ~IFF_RUNNING;
3178 bge_init(sc);
3179
3180 ifp->if_oerrors++;
2f54d1d2
SZ
3181
3182 if (!ifq_is_empty(&ifp->if_snd))
9db4b353 3183 if_devstart(ifp);
984263bc
MD
3184}
3185
3186/*
3187 * Stop the adapter and free any mbufs allocated to the
3188 * RX and TX lists.
3189 */
3190static void
33c39a69 3191bge_stop(struct bge_softc *sc)
984263bc 3192{
33c39a69 3193 struct ifnet *ifp = &sc->arpcom.ac_if;
984263bc
MD
3194 struct ifmedia_entry *ifm;
3195 struct mii_data *mii = NULL;
3196 int mtmp, itmp;
3197
aa65409c
SZ
3198 ASSERT_SERIALIZED(ifp->if_serializer);
3199
0ecb11d7 3200 if ((sc->bge_flags & BGE_FLAG_TBI) == 0)
984263bc
MD
3201 mii = device_get_softc(sc->bge_miibus);
3202
263489fb 3203 callout_stop(&sc->bge_stat_timer);
984263bc
MD
3204
3205 /*
3206 * Disable all of the receiver blocks
3207 */
3208 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3209 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3210 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
0ecb11d7 3211 if (!BGE_IS_5705_PLUS(sc))
7e40b8c5 3212 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
984263bc
MD
3213 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3214 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3215 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3216
3217 /*
3218 * Disable all of the transmit blocks
3219 */
3220 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3221 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3222 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3223 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3224 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
0ecb11d7 3225 if (!BGE_IS_5705_PLUS(sc))
7e40b8c5 3226 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
984263bc
MD
3227 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3228
3229 /*
3230 * Shut down all of the memory managers and related
3231 * state machines.
3232 */
3233 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3234 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
0ecb11d7 3235 if (!BGE_IS_5705_PLUS(sc))
7e40b8c5 3236 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
984263bc
MD
3237 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3238 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
0ecb11d7 3239 if (!BGE_IS_5705_PLUS(sc)) {
7e40b8c5
HP
3240 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3241 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3242 }
984263bc
MD
3243
3244 /* Disable host interrupts. */
ba39cc82 3245 bge_disable_intr(sc);
984263bc
MD
3246
3247 /*
3248 * Tell firmware we're shutting down.
3249 */
3250 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3251
3252 /* Free the RX lists. */
3253 bge_free_rx_ring_std(sc);
3254
3255 /* Free jumbo RX list. */
0ecb11d7 3256 if (BGE_IS_JUMBO_CAPABLE(sc))
7e40b8c5 3257 bge_free_rx_ring_jumbo(sc);
984263bc
MD
3258
3259 /* Free TX buffers. */
3260 bge_free_tx_ring(sc);
3261
3262 /*
3263 * Isolate/power down the PHY, but leave the media selection
3264 * unchanged so that things will be put back to normal when
3265 * we bring the interface back up.
32715c56
SZ
3266 *
3267 * 'mii' may be NULL in the following cases:
3268 * - The device uses TBI.
3269 * - bge_stop() is called by bge_detach().
984263bc 3270 */
32715c56 3271 if (mii != NULL) {
984263bc
MD
3272 itmp = ifp->if_flags;
3273 ifp->if_flags |= IFF_UP;
3274 ifm = mii->mii_media.ifm_cur;
3275 mtmp = ifm->ifm_media;
3276 ifm->ifm_media = IFM_ETHER|IFM_NONE;
3277 mii_mediachg(mii);
3278 ifm->ifm_media = mtmp;
3279 ifp->if_flags = itmp;
3280 }
3281
3282 sc->bge_link = 0;
055d06f0 3283 sc->bge_coal_chg = 0;
984263bc
MD
3284
3285 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3286
3287 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
e2c1cee4 3288 ifp->if_timer = 0;
984263bc
MD
3289}
3290
3291/*
3292 * Stop all chip I/O so that the kernel's probe routines don't
3293 * get confused by errant DMAs when rebooting.
3294 */
3295static void
33c39a69 3296bge_shutdown(device_t dev)
984263bc 3297{
33c39a69 3298 struct bge_softc *sc = device_get_softc(dev);
aa65409c 3299 struct ifnet *ifp = &sc->arpcom.ac_if;
984263bc 3300
aa65409c
SZ
3301 lwkt_serialize_enter(ifp->if_serializer);
3302 bge_stop(sc);
984263bc 3303 bge_reset(sc);
aa65409c
SZ
3304 lwkt_serialize_exit(ifp->if_serializer);
3305}
3306
3307static int
3308bge_suspend(device_t dev)
3309{
3310 struct bge_softc *sc = device_get_softc(dev);
3311 struct ifnet *ifp = &sc->arpcom.ac_if;
3312
3313 lwkt_serialize_enter(ifp->if_serializer);