Commit | Line | Data |
---|---|---|
984263bc MD |
1 | /* |
2 | * Copyright (c) 2001 Wind River Systems | |
3 | * Copyright (c) 1997, 1998, 1999, 2001 | |
4 | * Bill Paul <wpaul@windriver.com>. All rights reserved. | |
5 | * | |
6 | * Redistribution and use in source and binary forms, with or without | |
7 | * modification, are permitted provided that the following conditions | |
8 | * are met: | |
9 | * 1. Redistributions of source code must retain the above copyright | |
10 | * notice, this list of conditions and the following disclaimer. | |
11 | * 2. Redistributions in binary form must reproduce the above copyright | |
12 | * notice, this list of conditions and the following disclaimer in the | |
13 | * documentation and/or other materials provided with the distribution. | |
14 | * 3. All advertising materials mentioning features or use of this software | |
15 | * must display the following acknowledgement: | |
16 | * This product includes software developed by Bill Paul. | |
17 | * 4. Neither the name of the author nor the names of any co-contributors | |
18 | * may be used to endorse or promote products derived from this software | |
19 | * without specific prior written permission. | |
20 | * | |
21 | * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND | |
22 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
23 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
24 | * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD | |
25 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
26 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
27 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
28 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
29 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
30 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF | |
31 | * THE POSSIBILITY OF SUCH DAMAGE. | |
32 | * | |
011c0f93 | 33 | * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $ |
a7db2caa | 34 | * $DragonFly: src/sys/dev/netif/bge/if_bge.c,v 1.93 2008/06/25 13:00:09 sephe Exp $ |
1de703da | 35 | * |
984263bc MD |
36 | */ |
37 | ||
38 | /* | |
39 | * Broadcom BCM570x family gigabit ethernet driver for FreeBSD. | |
40 | * | |
41 | * Written by Bill Paul <wpaul@windriver.com> | |
42 | * Senior Engineer, Wind River Systems | |
43 | */ | |
44 | ||
45 | /* | |
46 | * The Broadcom BCM5700 is based on technology originally developed by | |
47 | * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet | |
48 | * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has | |
49 | * two on-board MIPS R4000 CPUs and can have as much as 16MB of external | |
50 | * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo | |
51 | * frames, highly configurable RX filtering, and 16 RX and TX queues | |
52 | * (which, along with RX filter rules, can be used for QOS applications). | |
53 | * Other features, such as TCP segmentation, may be available as part | |
54 | * of value-added firmware updates. Unlike the Tigon I and Tigon II, | |
55 | * firmware images can be stored in hardware and need not be compiled | |
56 | * into the driver. | |
57 | * | |
58 | * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will | |
59 | * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. | |
60 | * | |
61 | * The BCM5701 is a single-chip solution incorporating both the BCM5700 | |
62 | * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 | |
63 | * does not support external SSRAM. | |
64 | * | |
65 | * Broadcom also produces a variation of the BCM5700 under the "Altima" | |
66 | * brand name, which is functionally similar but lacks PCI-X support. | |
67 | * | |
68 | * Without external SSRAM, you can only have at most 4 TX rings, | |
69 | * and the use of the mini RX ring is disabled. This seems to imply | |
70 | * that these features are simply not available on the BCM5701. As a | |
71 | * result, this driver does not implement any support for the mini RX | |
72 | * ring. | |
73 | */ | |
74 | ||
315fe0ee | 75 | #include "opt_polling.h" |
a7db2caa SZ |
76 | #include "opt_ethernet.h" |
77 | ||
984263bc | 78 | #include <sys/param.h> |
62be1357 | 79 | #include <sys/bus.h> |
20c9a969 | 80 | #include <sys/endian.h> |
62be1357 | 81 | #include <sys/kernel.h> |
6b880771 | 82 | #include <sys/ktr.h> |
9db4b353 | 83 | #include <sys/interrupt.h> |
984263bc MD |
84 | #include <sys/mbuf.h> |
85 | #include <sys/malloc.h> | |
984263bc | 86 | #include <sys/queue.h> |
62be1357 | 87 | #include <sys/rman.h> |
16dca0df | 88 | #include <sys/serialize.h> |
62be1357 SZ |
89 | #include <sys/socket.h> |
90 | #include <sys/sockio.h> | |
055d06f0 | 91 | #include <sys/sysctl.h> |
984263bc | 92 | |
62be1357 SZ |
93 | #include <net/bpf.h> |
94 | #include <net/ethernet.h> | |
984263bc MD |
95 | #include <net/if.h> |
96 | #include <net/if_arp.h> | |
984263bc MD |
97 | #include <net/if_dl.h> |
98 | #include <net/if_media.h> | |
984263bc | 99 | #include <net/if_types.h> |
62be1357 | 100 | #include <net/ifq_var.h> |
1f2de5d4 | 101 | #include <net/vlan/if_vlan_var.h> |
b637f170 | 102 | #include <net/vlan/if_vlan_ether.h> |
984263bc | 103 | |
1f2de5d4 MD |
104 | #include <dev/netif/mii_layer/mii.h> |
105 | #include <dev/netif/mii_layer/miivar.h> | |
1f2de5d4 | 106 | #include <dev/netif/mii_layer/brgphyreg.h> |
984263bc | 107 | |
f952ab63 | 108 | #include <bus/pci/pcidevs.h> |
1f2de5d4 MD |
109 | #include <bus/pci/pcireg.h> |
110 | #include <bus/pci/pcivar.h> | |
984263bc | 111 | |
62be1357 SZ |
112 | #include <dev/netif/bge/if_bgereg.h> |
113 | ||
114 | /* "device miibus" required. See GENERIC if you get errors here. */ | |
115 | #include "miibus_if.h" | |
984263bc MD |
116 | |
117 | #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) | |
cb623c48 | 118 | #define BGE_MIN_FRAME 60 |
984263bc | 119 | |
984263bc MD |
120 | /* |
121 | * Various supported device vendors/types and their names. Note: the | |
122 | * spec seems to indicate that the hardware still has Alteon's vendor | |
123 | * ID burned into it, though it will always be overriden by the vendor | |
124 | * ID in the EEPROM. Just to be safe, we cover all possibilities. | |
125 | */ | |
126 | #define BGE_DEVDESC_MAX 64 /* Maximum device description length */ | |
127 | ||
128 | static struct bge_type bge_devs[] = { | |
0ecb11d7 SZ |
129 | { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996, |
130 | "3COM 3C996 Gigabit Ethernet" }, | |
131 | ||
f952ab63 | 132 | { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700, |
9a6ee7e2 JS |
133 | "Alteon BCM5700 Gigabit Ethernet" }, |
134 | { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701, | |
135 | "Alteon BCM5701 Gigabit Ethernet" }, | |
0ecb11d7 SZ |
136 | |
137 | { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000, | |
138 | "Altima AC1000 Gigabit Ethernet" }, | |
139 | { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001, | |
140 | "Altima AC1002 Gigabit Ethernet" }, | |
141 | { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100, | |
142 | "Altima AC9100 Gigabit Ethernet" }, | |
143 | ||
144 | { PCI_VENDOR_APPLE, PCI_PRODUCT_APPLE_BCM5701, | |
145 | "Apple BCM5701 Gigabit Ethernet" }, | |
146 | ||
f952ab63 | 147 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700, |
984263bc | 148 | "Broadcom BCM5700 Gigabit Ethernet" }, |
f952ab63 | 149 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701, |
984263bc | 150 | "Broadcom BCM5701 Gigabit Ethernet" }, |
0ecb11d7 SZ |
151 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702, |
152 | "Broadcom BCM5702 Gigabit Ethernet" }, | |
f952ab63 | 153 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X, |
984263bc | 154 | "Broadcom BCM5702X Gigabit Ethernet" }, |
9a6ee7e2 JS |
155 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT, |
156 | "Broadcom BCM5702 Gigabit Ethernet" }, | |
0ecb11d7 SZ |
157 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703, |
158 | "Broadcom BCM5703 Gigabit Ethernet" }, | |
f952ab63 JS |
159 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X, |
160 | "Broadcom BCM5703X Gigabit Ethernet" }, | |
9a6ee7e2 JS |
161 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703A3, |
162 | "Broadcom BCM5703 Gigabit Ethernet" }, | |
f952ab63 | 163 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C, |
984263bc | 164 | "Broadcom BCM5704C Dual Gigabit Ethernet" }, |
f952ab63 | 165 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S, |
984263bc | 166 | "Broadcom BCM5704S Dual Gigabit Ethernet" }, |
0ecb11d7 SZ |
167 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S_ALT, |
168 | "Broadcom BCM5704S Dual Gigabit Ethernet" }, | |
f952ab63 | 169 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705, |
7e40b8c5 | 170 | "Broadcom BCM5705 Gigabit Ethernet" }, |
0ecb11d7 SZ |
171 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705F, |
172 | "Broadcom BCM5705F Gigabit Ethernet" }, | |
9a6ee7e2 JS |
173 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K, |
174 | "Broadcom BCM5705K Gigabit Ethernet" }, | |
f952ab63 | 175 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M, |
7e40b8c5 | 176 | "Broadcom BCM5705M Gigabit Ethernet" }, |
9a6ee7e2 | 177 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT, |
7e40b8c5 | 178 | "Broadcom BCM5705M Gigabit Ethernet" }, |
92decf65 | 179 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714, |
9a6ee7e2 | 180 | "Broadcom BCM5714C Gigabit Ethernet" }, |
0ecb11d7 SZ |
181 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714S, |
182 | "Broadcom BCM5714S Gigabit Ethernet" }, | |
183 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715, | |
184 | "Broadcom BCM5715 Gigabit Ethernet" }, | |
185 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715S, | |
186 | "Broadcom BCM5715S Gigabit Ethernet" }, | |
187 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720, | |
188 | "Broadcom BCM5720 Gigabit Ethernet" }, | |
9a6ee7e2 JS |
189 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721, |
190 | "Broadcom BCM5721 Gigabit Ethernet" }, | |
0ecb11d7 SZ |
191 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5722, |
192 | "Broadcom BCM5722 Gigabit Ethernet" }, | |
9a6ee7e2 JS |
193 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750, |
194 | "Broadcom BCM5750 Gigabit Ethernet" }, | |
195 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750M, | |
196 | "Broadcom BCM5750M Gigabit Ethernet" }, | |
b7bef88c JS |
197 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751, |
198 | "Broadcom BCM5751 Gigabit Ethernet" }, | |
0ecb11d7 SZ |
199 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751F, |
200 | "Broadcom BCM5751F Gigabit Ethernet" }, | |
9a6ee7e2 JS |
201 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M, |
202 | "Broadcom BCM5751M Gigabit Ethernet" }, | |
bae5fe9a SZ |
203 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752, |
204 | "Broadcom BCM5752 Gigabit Ethernet" }, | |
0ecb11d7 SZ |
205 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752M, |
206 | "Broadcom BCM5752M Gigabit Ethernet" }, | |
207 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753, | |
208 | "Broadcom BCM5753 Gigabit Ethernet" }, | |
209 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753F, | |
210 | "Broadcom BCM5753F Gigabit Ethernet" }, | |
211 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753M, | |
212 | "Broadcom BCM5753M Gigabit Ethernet" }, | |
213 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754, | |
214 | "Broadcom BCM5754 Gigabit Ethernet" }, | |
215 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754M, | |
216 | "Broadcom BCM5754M Gigabit Ethernet" }, | |
217 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755, | |
218 | "Broadcom BCM5755 Gigabit Ethernet" }, | |
219 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755M, | |
220 | "Broadcom BCM5755M Gigabit Ethernet" }, | |
221 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5756, | |
222 | "Broadcom BCM5756 Gigabit Ethernet" }, | |
223 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780, | |
224 | "Broadcom BCM5780 Gigabit Ethernet" }, | |
225 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780S, | |
226 | "Broadcom BCM5780S Gigabit Ethernet" }, | |
227 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5781, | |
228 | "Broadcom BCM5781 Gigabit Ethernet" }, | |
f952ab63 | 229 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782, |
7e40b8c5 | 230 | "Broadcom BCM5782 Gigabit Ethernet" }, |
0ecb11d7 SZ |
231 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5786, |
232 | "Broadcom BCM5786 Gigabit Ethernet" }, | |
233 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787, | |
234 | "Broadcom BCM5787 Gigabit Ethernet" }, | |
235 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787F, | |
236 | "Broadcom BCM5787F Gigabit Ethernet" }, | |
237 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787M, | |
238 | "Broadcom BCM5787M Gigabit Ethernet" }, | |
9a6ee7e2 | 239 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788, |
f952ab63 | 240 | "Broadcom BCM5788 Gigabit Ethernet" }, |
9a6ee7e2 JS |
241 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789, |
242 | "Broadcom BCM5789 Gigabit Ethernet" }, | |
f952ab63 JS |
243 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901, |
244 | "Broadcom BCM5901 Fast Ethernet" }, | |
245 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2, | |
246 | "Broadcom BCM5901A2 Fast Ethernet" }, | |
0ecb11d7 SZ |
247 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5903M, |
248 | "Broadcom BCM5903M Fast Ethernet" }, | |
249 | ||
f952ab63 | 250 | { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1, |
984263bc | 251 | "SysKonnect Gigabit Ethernet" }, |
0ecb11d7 | 252 | |
984263bc MD |
253 | { 0, 0, NULL } |
254 | }; | |
255 | ||
0ecb11d7 SZ |
256 | #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO) |
257 | #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY) | |
258 | #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS) | |
259 | #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY) | |
260 | #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS) | |
261 | ||
33c39a69 JS |
262 | static int bge_probe(device_t); |
263 | static int bge_attach(device_t); | |
264 | static int bge_detach(device_t); | |
33c39a69 JS |
265 | static void bge_txeof(struct bge_softc *); |
266 | static void bge_rxeof(struct bge_softc *); | |
267 | ||
268 | static void bge_tick(void *); | |
269 | static void bge_stats_update(struct bge_softc *); | |
270 | static void bge_stats_update_regs(struct bge_softc *); | |
4a607ed6 | 271 | static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *); |
33c39a69 | 272 | |
315fe0ee MD |
273 | #ifdef DEVICE_POLLING |
274 | static void bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); | |
275 | #endif | |
33c39a69 | 276 | static void bge_intr(void *); |
ba39cc82 SZ |
277 | static void bge_enable_intr(struct bge_softc *); |
278 | static void bge_disable_intr(struct bge_softc *); | |
33c39a69 JS |
279 | static void bge_start(struct ifnet *); |
280 | static int bge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); | |
281 | static void bge_init(void *); | |
282 | static void bge_stop(struct bge_softc *); | |
283 | static void bge_watchdog(struct ifnet *); | |
284 | static void bge_shutdown(device_t); | |
aa65409c SZ |
285 | static int bge_suspend(device_t); |
286 | static int bge_resume(device_t); | |
33c39a69 JS |
287 | static int bge_ifmedia_upd(struct ifnet *); |
288 | static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); | |
289 | ||
290 | static uint8_t bge_eeprom_getbyte(struct bge_softc *, uint32_t, uint8_t *); | |
291 | static int bge_read_eeprom(struct bge_softc *, caddr_t, uint32_t, size_t); | |
292 | ||
33c39a69 | 293 | static void bge_setmulti(struct bge_softc *); |
6439b28a | 294 | static void bge_setpromisc(struct bge_softc *); |
33c39a69 | 295 | |
33c39a69 JS |
296 | static int bge_alloc_jumbo_mem(struct bge_softc *); |
297 | static void bge_free_jumbo_mem(struct bge_softc *); | |
2aa9b12f JS |
298 | static struct bge_jslot |
299 | *bge_jalloc(struct bge_softc *); | |
300 | static void bge_jfree(void *); | |
301 | static void bge_jref(void *); | |
33c39a69 JS |
302 | static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *); |
303 | static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); | |
304 | static int bge_init_rx_ring_std(struct bge_softc *); | |
305 | static void bge_free_rx_ring_std(struct bge_softc *); | |
306 | static int bge_init_rx_ring_jumbo(struct bge_softc *); | |
307 | static void bge_free_rx_ring_jumbo(struct bge_softc *); | |
308 | static void bge_free_tx_ring(struct bge_softc *); | |
309 | static int bge_init_tx_ring(struct bge_softc *); | |
310 | ||
311 | static int bge_chipinit(struct bge_softc *); | |
312 | static int bge_blockinit(struct bge_softc *); | |
984263bc | 313 | |
33c39a69 JS |
314 | static uint32_t bge_readmem_ind(struct bge_softc *, uint32_t); |
315 | static void bge_writemem_ind(struct bge_softc *, uint32_t, uint32_t); | |
984263bc | 316 | #ifdef notdef |
33c39a69 | 317 | static uint32_t bge_readreg_ind(struct bge_softc *, uint32_t); |
984263bc | 318 | #endif |
33c39a69 | 319 | static void bge_writereg_ind(struct bge_softc *, uint32_t, uint32_t); |
0ecb11d7 | 320 | static void bge_writemem_direct(struct bge_softc *, uint32_t, uint32_t); |
984263bc | 321 | |
33c39a69 JS |
322 | static int bge_miibus_readreg(device_t, int, int); |
323 | static int bge_miibus_writereg(device_t, int, int, int); | |
324 | static void bge_miibus_statchg(device_t); | |
db861466 SZ |
325 | static void bge_bcm5700_link_upd(struct bge_softc *, uint32_t); |
326 | static void bge_tbi_link_upd(struct bge_softc *, uint32_t); | |
327 | static void bge_copper_link_upd(struct bge_softc *, uint32_t); | |
984263bc | 328 | |
33c39a69 | 329 | static void bge_reset(struct bge_softc *); |
984263bc | 330 | |
20c9a969 SZ |
331 | static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int); |
332 | static void bge_dma_map_mbuf(void *, bus_dma_segment_t *, int, | |
333 | bus_size_t, int); | |
334 | static int bge_dma_alloc(struct bge_softc *); | |
335 | static void bge_dma_free(struct bge_softc *); | |
336 | static int bge_dma_block_alloc(struct bge_softc *, bus_size_t, | |
337 | bus_dma_tag_t *, bus_dmamap_t *, | |
338 | void **, bus_addr_t *); | |
339 | static void bge_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *); | |
340 | ||
055d06f0 SZ |
341 | static void bge_coal_change(struct bge_softc *); |
342 | static int bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS); | |
343 | static int bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS); | |
344 | static int bge_sysctl_rx_max_coal_bds(SYSCTL_HANDLER_ARGS); | |
345 | static int bge_sysctl_tx_max_coal_bds(SYSCTL_HANDLER_ARGS); | |
346 | static int bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *, uint32_t); | |
347 | ||
5c56d5d8 SZ |
348 | /* |
349 | * Set following tunable to 1 for some IBM blade servers with the DNLK | |
350 | * switch module. Auto negotiation is broken for those configurations. | |
351 | */ | |
352 | static int bge_fake_autoneg = 0; | |
353 | TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg); | |
354 | ||
055d06f0 SZ |
355 | /* Interrupt moderation control variables. */ |
356 | static int bge_rx_coal_ticks = 150; /* usec */ | |
357 | static int bge_tx_coal_ticks = 1000000; /* usec */ | |
358 | static int bge_rx_max_coal_bds = 16; | |
359 | static int bge_tx_max_coal_bds = 32; | |
360 | ||
361 | TUNABLE_INT("hw.bge.rx_coal_ticks", &bge_rx_coal_ticks); | |
362 | TUNABLE_INT("hw.bge.tx_coal_ticks", &bge_tx_coal_ticks); | |
363 | TUNABLE_INT("hw.bge.rx_max_coal_bds", &bge_rx_max_coal_bds); | |
364 | TUNABLE_INT("hw.bge.tx_max_coal_bds", &bge_tx_max_coal_bds); | |
365 | ||
6b880771 SZ |
366 | #if !defined(KTR_IF_BGE) |
367 | #define KTR_IF_BGE KTR_ALL | |
368 | #endif | |
369 | KTR_INFO_MASTER(if_bge); | |
370 | KTR_INFO(KTR_IF_BGE, if_bge, intr, 0, "intr", 0); | |
371 | KTR_INFO(KTR_IF_BGE, if_bge, rx_pkt, 1, "rx_pkt", 0); | |
372 | KTR_INFO(KTR_IF_BGE, if_bge, tx_pkt, 2, "tx_pkt", 0); | |
373 | #define logif(name) KTR_LOG(if_bge_ ## name) | |
374 | ||
984263bc MD |
375 | static device_method_t bge_methods[] = { |
376 | /* Device interface */ | |
377 | DEVMETHOD(device_probe, bge_probe), | |
378 | DEVMETHOD(device_attach, bge_attach), | |
379 | DEVMETHOD(device_detach, bge_detach), | |
380 | DEVMETHOD(device_shutdown, bge_shutdown), | |
aa65409c SZ |
381 | DEVMETHOD(device_suspend, bge_suspend), |
382 | DEVMETHOD(device_resume, bge_resume), | |
984263bc MD |
383 | |
384 | /* bus interface */ | |
385 | DEVMETHOD(bus_print_child, bus_generic_print_child), | |
386 | DEVMETHOD(bus_driver_added, bus_generic_driver_added), | |
387 | ||
388 | /* MII interface */ | |
389 | DEVMETHOD(miibus_readreg, bge_miibus_readreg), | |
390 | DEVMETHOD(miibus_writereg, bge_miibus_writereg), | |
391 | DEVMETHOD(miibus_statchg, bge_miibus_statchg), | |
392 | ||
393 | { 0, 0 } | |
394 | }; | |
395 | ||
33c39a69 | 396 | static DEFINE_CLASS_0(bge, bge_driver, bge_methods, sizeof(struct bge_softc)); |
984263bc MD |
397 | static devclass_t bge_devclass; |
398 | ||
32832096 | 399 | DECLARE_DUMMY_MODULE(if_bge); |
984263bc MD |
400 | DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, 0, 0); |
401 | DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0); | |
402 | ||
33c39a69 JS |
403 | static uint32_t |
404 | bge_readmem_ind(struct bge_softc *sc, uint32_t off) | |
984263bc | 405 | { |
33c39a69 | 406 | device_t dev = sc->bge_dev; |
0ecb11d7 | 407 | uint32_t val; |
984263bc MD |
408 | |
409 | pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); | |
0ecb11d7 SZ |
410 | val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4); |
411 | pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); | |
412 | return (val); | |
984263bc MD |
413 | } |
414 | ||
415 | static void | |
33c39a69 | 416 | bge_writemem_ind(struct bge_softc *sc, uint32_t off, uint32_t val) |
984263bc | 417 | { |
33c39a69 | 418 | device_t dev = sc->bge_dev; |
984263bc MD |
419 | |
420 | pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); | |
421 | pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); | |
0ecb11d7 | 422 | pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); |
984263bc MD |
423 | } |
424 | ||
425 | #ifdef notdef | |
33c39a69 JS |
426 | static uint32_t |
427 | bge_readreg_ind(struct bge_softc *sc, uin32_t off) | |
984263bc | 428 | { |
33c39a69 | 429 | device_t dev = sc->bge_dev; |
984263bc MD |
430 | |
431 | pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); | |
432 | return(pci_read_config(dev, BGE_PCI_REG_DATA, 4)); | |
433 | } | |
434 | #endif | |
435 | ||
436 | static void | |
33c39a69 | 437 | bge_writereg_ind(struct bge_softc *sc, uint32_t off, uint32_t val) |
984263bc | 438 | { |
33c39a69 | 439 | device_t dev = sc->bge_dev; |
984263bc MD |
440 | |
441 | pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); | |
442 | pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); | |
984263bc MD |
443 | } |
444 | ||
0ecb11d7 SZ |
445 | static void |
446 | bge_writemem_direct(struct bge_softc *sc, uint32_t off, uint32_t val) | |
447 | { | |
448 | CSR_WRITE_4(sc, off, val); | |
449 | } | |
450 | ||
984263bc MD |
451 | /* |
452 | * Read a byte of data stored in the EEPROM at address 'addr.' The | |
453 | * BCM570x supports both the traditional bitbang interface and an | |
454 | * auto access interface for reading the EEPROM. We use the auto | |
455 | * access method. | |
456 | */ | |
33c39a69 JS |
457 | static uint8_t |
458 | bge_eeprom_getbyte(struct bge_softc *sc, uint32_t addr, uint8_t *dest) | |
984263bc MD |
459 | { |
460 | int i; | |
33c39a69 | 461 | uint32_t byte = 0; |
984263bc MD |
462 | |
463 | /* | |
464 | * Enable use of auto EEPROM access so we can avoid | |
465 | * having to use the bitbang method. | |
466 | */ | |
467 | BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); | |
468 | ||
469 | /* Reset the EEPROM, load the clock period. */ | |
470 | CSR_WRITE_4(sc, BGE_EE_ADDR, | |
471 | BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); | |
472 | DELAY(20); | |
473 | ||
474 | /* Issue the read EEPROM command. */ | |
475 | CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); | |
476 | ||
477 | /* Wait for completion */ | |
478 | for(i = 0; i < BGE_TIMEOUT * 10; i++) { | |
479 | DELAY(10); | |
480 | if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) | |
481 | break; | |
482 | } | |
483 | ||
484 | if (i == BGE_TIMEOUT) { | |
c6fd6f3b | 485 | if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n"); |
7b47d9c2 | 486 | return(1); |
984263bc MD |
487 | } |
488 | ||
489 | /* Get result. */ | |
490 | byte = CSR_READ_4(sc, BGE_EE_DATA); | |
491 | ||
492 | *dest = (byte >> ((addr % 4) * 8)) & 0xFF; | |
493 | ||
494 | return(0); | |
495 | } | |
496 | ||
497 | /* | |
498 | * Read a sequence of bytes from the EEPROM. | |
499 | */ | |
500 | static int | |
33c39a69 | 501 | bge_read_eeprom(struct bge_softc *sc, caddr_t dest, uint32_t off, size_t len) |
984263bc | 502 | { |
33c39a69 JS |
503 | size_t i; |
504 | int err; | |
505 | uint8_t byte; | |
984263bc | 506 | |
33c39a69 | 507 | for (byte = 0, err = 0, i = 0; i < len; i++) { |
984263bc MD |
508 | err = bge_eeprom_getbyte(sc, off + i, &byte); |
509 | if (err) | |
510 | break; | |
511 | *(dest + i) = byte; | |
512 | } | |
513 | ||
514 | return(err ? 1 : 0); | |
515 | } | |
516 | ||
517 | static int | |
33c39a69 | 518 | bge_miibus_readreg(device_t dev, int phy, int reg) |
984263bc MD |
519 | { |
520 | struct bge_softc *sc; | |
521 | struct ifnet *ifp; | |
33c39a69 | 522 | uint32_t val, autopoll; |
984263bc MD |
523 | int i; |
524 | ||
525 | sc = device_get_softc(dev); | |
526 | ifp = &sc->arpcom.ac_if; | |
527 | ||
7e40b8c5 HP |
528 | /* |
529 | * Broadcom's own driver always assumes the internal | |
530 | * PHY is at GMII address 1. On some chips, the PHY responds | |
531 | * to accesses at all addresses, which could cause us to | |
532 | * bogusly attach the PHY 32 times at probe type. Always | |
533 | * restricting the lookup to address 1 is simpler than | |
534 | * trying to figure out which chips revisions should be | |
535 | * special-cased. | |
536 | */ | |
984263bc | 537 | if (phy != 1) |
7e40b8c5 | 538 | return(0); |
984263bc MD |
539 | |
540 | /* Reading with autopolling on may trigger PCI errors */ | |
541 | autopoll = CSR_READ_4(sc, BGE_MI_MODE); | |
542 | if (autopoll & BGE_MIMODE_AUTOPOLL) { | |
543 | BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); | |
544 | DELAY(40); | |
545 | } | |
546 | ||
547 | CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| | |
548 | BGE_MIPHY(phy)|BGE_MIREG(reg)); | |
549 | ||
550 | for (i = 0; i < BGE_TIMEOUT; i++) { | |
551 | val = CSR_READ_4(sc, BGE_MI_COMM); | |
552 | if (!(val & BGE_MICOMM_BUSY)) | |
553 | break; | |
554 | } | |
555 | ||
556 | if (i == BGE_TIMEOUT) { | |
c6fd6f3b | 557 | if_printf(ifp, "PHY read timed out\n"); |
984263bc MD |
558 | val = 0; |
559 | goto done; | |
560 | } | |
561 | ||
562 | val = CSR_READ_4(sc, BGE_MI_COMM); | |
563 | ||
564 | done: | |
565 | if (autopoll & BGE_MIMODE_AUTOPOLL) { | |
566 | BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); | |
567 | DELAY(40); | |
568 | } | |
569 | ||
570 | if (val & BGE_MICOMM_READFAIL) | |
571 | return(0); | |
572 | ||
573 | return(val & 0xFFFF); | |
574 | } | |
575 | ||
576 | static int | |
33c39a69 | 577 | bge_miibus_writereg(device_t dev, int phy, int reg, int val) |
984263bc MD |
578 | { |
579 | struct bge_softc *sc; | |
33c39a69 | 580 | uint32_t autopoll; |
984263bc MD |
581 | int i; |
582 | ||
583 | sc = device_get_softc(dev); | |
584 | ||
585 | /* Reading with autopolling on may trigger PCI errors */ | |
586 | autopoll = CSR_READ_4(sc, BGE_MI_MODE); | |
587 | if (autopoll & BGE_MIMODE_AUTOPOLL) { | |
588 | BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); | |
589 | DELAY(40); | |
590 | } | |
591 | ||
592 | CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| | |
593 | BGE_MIPHY(phy)|BGE_MIREG(reg)|val); | |
594 | ||
595 | for (i = 0; i < BGE_TIMEOUT; i++) { | |
596 | if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) | |
597 | break; | |
598 | } | |
599 | ||
600 | if (autopoll & BGE_MIMODE_AUTOPOLL) { | |
601 | BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); | |
602 | DELAY(40); | |
603 | } | |
604 | ||
605 | if (i == BGE_TIMEOUT) { | |
c6fd6f3b | 606 | if_printf(&sc->arpcom.ac_if, "PHY read timed out\n"); |
984263bc MD |
607 | return(0); |
608 | } | |
609 | ||
610 | return(0); | |
611 | } | |
612 | ||
613 | static void | |
33c39a69 | 614 | bge_miibus_statchg(device_t dev) |
984263bc MD |
615 | { |
616 | struct bge_softc *sc; | |
617 | struct mii_data *mii; | |
618 | ||
619 | sc = device_get_softc(dev); | |
620 | mii = device_get_softc(sc->bge_miibus); | |
621 | ||
622 | BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); | |
7f259627 | 623 | if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { |
984263bc MD |
624 | BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); |
625 | } else { | |
626 | BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); | |
627 | } | |
628 | ||
629 | if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { | |
630 | BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); | |
631 | } else { | |
632 | BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); | |
633 | } | |
984263bc MD |
634 | } |
635 | ||
984263bc MD |
636 | /* |
637 | * Memory management for jumbo frames. | |
638 | */ | |
984263bc | 639 | static int |
33c39a69 | 640 | bge_alloc_jumbo_mem(struct bge_softc *sc) |
984263bc | 641 | { |
20c9a969 | 642 | struct ifnet *ifp = &sc->arpcom.ac_if; |
2aa9b12f | 643 | struct bge_jslot *entry; |
20c9a969 SZ |
644 | uint8_t *ptr; |
645 | bus_addr_t paddr; | |
646 | int i, error; | |
984263bc | 647 | |
20c9a969 SZ |
648 | /* |
649 | * Create tag for jumbo mbufs. | |
650 | * This is really a bit of a kludge. We allocate a special | |
651 | * jumbo buffer pool which (thanks to the way our DMA | |
652 | * memory allocation works) will consist of contiguous | |
653 | * pages. This means that even though a jumbo buffer might | |
654 | * be larger than a page size, we don't really need to | |
655 | * map it into more than one DMA segment. However, the | |
656 | * default mbuf tag will result in multi-segment mappings, | |
657 | * so we have to create a special jumbo mbuf tag that | |
658 | * lets us get away with mapping the jumbo buffers as | |
659 | * a single segment. I think eventually the driver should | |
660 | * be changed so that it uses ordinary mbufs and cluster | |
661 | * buffers, i.e. jumbo frames can span multiple DMA | |
662 | * descriptors. But that's a project for another day. | |
663 | */ | |
984263bc | 664 | |
20c9a969 SZ |
665 | /* |
666 | * Create DMA stuffs for jumbo RX ring. | |
667 | */ | |
668 | error = bge_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ, | |
669 | &sc->bge_cdata.bge_rx_jumbo_ring_tag, | |
670 | &sc->bge_cdata.bge_rx_jumbo_ring_map, | |
671 | (void **)&sc->bge_ldata.bge_rx_jumbo_ring, | |
672 | &sc->bge_ldata.bge_rx_jumbo_ring_paddr); | |
673 | if (error) { | |
674 | if_printf(ifp, "could not create jumbo RX ring\n"); | |
675 | return error; | |
676 | } | |
677 | ||
678 | /* | |
679 | * Create DMA stuffs for jumbo buffer block. | |
680 | */ | |
681 | error = bge_dma_block_alloc(sc, BGE_JMEM, | |
682 | &sc->bge_cdata.bge_jumbo_tag, | |
683 | &sc->bge_cdata.bge_jumbo_map, | |
684 | (void **)&sc->bge_ldata.bge_jumbo_buf, | |
685 | &paddr); | |
686 | if (error) { | |
687 | if_printf(ifp, "could not create jumbo buffer\n"); | |
688 | return error; | |
984263bc MD |
689 | } |
690 | ||
691 | SLIST_INIT(&sc->bge_jfree_listhead); | |
984263bc MD |
692 | |
693 | /* | |
694 | * Now divide it up into 9K pieces and save the addresses | |
695 | * in an array. Note that we play an evil trick here by using | |
696 | * the first few bytes in the buffer to hold the the address | |
697 | * of the softc structure for this interface. This is because | |
698 | * bge_jfree() needs it, but it is called by the mbuf management | |
699 | * code which will not pass it to us explicitly. | |
700 | */ | |
20c9a969 | 701 | for (i = 0, ptr = sc->bge_ldata.bge_jumbo_buf; i < BGE_JSLOTS; i++) { |
2aa9b12f JS |
702 | entry = &sc->bge_cdata.bge_jslots[i]; |
703 | entry->bge_sc = sc; | |
704 | entry->bge_buf = ptr; | |
20c9a969 | 705 | entry->bge_paddr = paddr; |
2aa9b12f JS |
706 | entry->bge_inuse = 0; |
707 | entry->bge_slot = i; | |
708 | SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jslot_link); | |
20c9a969 | 709 | |
2aa9b12f | 710 | ptr += BGE_JLEN; |
20c9a969 | 711 | paddr += BGE_JLEN; |
984263bc | 712 | } |
20c9a969 | 713 | return 0; |
984263bc MD |
714 | } |
715 | ||
716 | static void | |
33c39a69 | 717 | bge_free_jumbo_mem(struct bge_softc *sc) |
984263bc | 718 | { |
20c9a969 SZ |
719 | /* Destroy jumbo RX ring. */ |
720 | bge_dma_block_free(sc->bge_cdata.bge_rx_jumbo_ring_tag, | |
721 | sc->bge_cdata.bge_rx_jumbo_ring_map, | |
722 | sc->bge_ldata.bge_rx_jumbo_ring); | |
723 | ||
724 | /* Destroy jumbo buffer block. */ | |
725 | bge_dma_block_free(sc->bge_cdata.bge_jumbo_tag, | |
726 | sc->bge_cdata.bge_jumbo_map, | |
727 | sc->bge_ldata.bge_jumbo_buf); | |
984263bc MD |
728 | } |
729 | ||
730 | /* | |
731 | * Allocate a jumbo buffer. | |
732 | */ | |
2aa9b12f | 733 | static struct bge_jslot * |
33c39a69 | 734 | bge_jalloc(struct bge_softc *sc) |
984263bc | 735 | { |
2aa9b12f | 736 | struct bge_jslot *entry; |
33c39a69 | 737 | |
16dca0df | 738 | lwkt_serialize_enter(&sc->bge_jslot_serializer); |
984263bc | 739 | entry = SLIST_FIRST(&sc->bge_jfree_listhead); |
16dca0df MD |
740 | if (entry) { |
741 | SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jslot_link); | |
742 | entry->bge_inuse = 1; | |
743 | } else { | |
c6fd6f3b | 744 | if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n"); |
984263bc | 745 | } |
16dca0df | 746 | lwkt_serialize_exit(&sc->bge_jslot_serializer); |
2aa9b12f | 747 | return(entry); |
984263bc MD |
748 | } |
749 | ||
750 | /* | |
751 | * Adjust usage count on a jumbo buffer. | |
752 | */ | |
753 | static void | |
2aa9b12f | 754 | bge_jref(void *arg) |
984263bc | 755 | { |
2aa9b12f JS |
756 | struct bge_jslot *entry = (struct bge_jslot *)arg; |
757 | struct bge_softc *sc = entry->bge_sc; | |
984263bc MD |
758 | |
759 | if (sc == NULL) | |
760 | panic("bge_jref: can't find softc pointer!"); | |
761 | ||
16dca0df | 762 | if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) { |
984263bc MD |
763 | panic("bge_jref: asked to reference buffer " |
764 | "that we don't manage!"); | |
16dca0df | 765 | } else if (entry->bge_inuse == 0) { |
984263bc | 766 | panic("bge_jref: buffer already free!"); |
16dca0df MD |
767 | } else { |
768 | atomic_add_int(&entry->bge_inuse, 1); | |
769 | } | |
984263bc MD |
770 | } |
771 | ||
772 | /* | |
773 | * Release a jumbo buffer. | |
774 | */ | |
775 | static void | |
2aa9b12f | 776 | bge_jfree(void *arg) |
984263bc | 777 | { |
2aa9b12f JS |
778 | struct bge_jslot *entry = (struct bge_jslot *)arg; |
779 | struct bge_softc *sc = entry->bge_sc; | |
984263bc MD |
780 | |
781 | if (sc == NULL) | |
782 | panic("bge_jfree: can't find softc pointer!"); | |
783 | ||
16dca0df | 784 | if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) { |
984263bc | 785 | panic("bge_jfree: asked to free buffer that we don't manage!"); |
16dca0df | 786 | } else if (entry->bge_inuse == 0) { |
984263bc | 787 | panic("bge_jfree: buffer already free!"); |
16dca0df MD |
788 | } else { |
789 | /* | |
790 | * Possible MP race to 0, use the serializer. The atomic insn | |
791 | * is still needed for races against bge_jref(). | |
792 | */ | |
793 | lwkt_serialize_enter(&sc->bge_jslot_serializer); | |
794 | atomic_subtract_int(&entry->bge_inuse, 1); | |
795 | if (entry->bge_inuse == 0) { | |
796 | SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, | |
797 | entry, jslot_link); | |
798 | } | |
799 | lwkt_serialize_exit(&sc->bge_jslot_serializer); | |
800 | } | |
984263bc MD |
801 | } |
802 | ||
803 | ||
804 | /* | |
805 | * Intialize a standard receive ring descriptor. | |
806 | */ | |
807 | static int | |
33c39a69 | 808 | bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m) |
984263bc | 809 | { |
33c39a69 | 810 | struct mbuf *m_new = NULL; |
20c9a969 SZ |
811 | struct bge_dmamap_arg ctx; |
812 | bus_dma_segment_t seg; | |
33c39a69 | 813 | struct bge_rx_bd *r; |
20c9a969 | 814 | int error; |
984263bc MD |
815 | |
816 | if (m == NULL) { | |
d5086f2b | 817 | m_new = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR); |
33c39a69 | 818 | if (m_new == NULL) |
20c9a969 | 819 | return ENOBUFS; |
984263bc MD |
820 | } else { |
821 | m_new = m; | |
984263bc MD |
822 | m_new->m_data = m_new->m_ext.ext_buf; |
823 | } | |
20c9a969 | 824 | m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; |
984263bc | 825 | |
0ecb11d7 | 826 | if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) |
984263bc | 827 | m_adj(m_new, ETHER_ALIGN); |
20c9a969 SZ |
828 | |
829 | ctx.bge_maxsegs = 1; | |
830 | ctx.bge_segs = &seg; | |
831 | error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, | |
832 | sc->bge_cdata.bge_rx_std_dmamap[i], | |
833 | m_new, bge_dma_map_mbuf, &ctx, | |
834 | BUS_DMA_NOWAIT); | |
835 | if (error || ctx.bge_maxsegs == 0) { | |
836 | if (m == NULL) | |
837 | m_freem(m_new); | |
838 | return ENOMEM; | |
839 | } | |
840 | ||
984263bc | 841 | sc->bge_cdata.bge_rx_std_chain[i] = m_new; |
20c9a969 SZ |
842 | |
843 | r = &sc->bge_ldata.bge_rx_std_ring[i]; | |
844 | r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_segs[0].ds_addr); | |
845 | r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_segs[0].ds_addr); | |
984263bc MD |
846 | r->bge_flags = BGE_RXBDFLAG_END; |
847 | r->bge_len = m_new->m_len; | |
848 | r->bge_idx = i; | |
849 | ||
20c9a969 SZ |
850 | bus_dmamap_sync(sc->bge_cdata.bge_mtag, |
851 | sc->bge_cdata.bge_rx_std_dmamap[i], | |
852 | BUS_DMASYNC_PREREAD); | |
853 | return 0; | |
984263bc MD |
854 | } |
855 | ||
856 | /* | |
857 | * Initialize a jumbo receive ring descriptor. This allocates | |
858 | * a jumbo buffer from the pool managed internally by the driver. | |
859 | */ | |
860 | static int | |
33c39a69 | 861 | bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m) |
984263bc MD |
862 | { |
863 | struct mbuf *m_new = NULL; | |
20c9a969 | 864 | struct bge_jslot *buf; |
984263bc | 865 | struct bge_rx_bd *r; |
20c9a969 | 866 | bus_addr_t paddr; |
984263bc MD |
867 | |
868 | if (m == NULL) { | |
984263bc | 869 | /* Allocate the mbuf. */ |
74f1caca | 870 | MGETHDR(m_new, MB_DONTWAIT, MT_DATA); |
33c39a69 | 871 | if (m_new == NULL) |
984263bc | 872 | return(ENOBUFS); |
984263bc MD |
873 | |
874 | /* Allocate the jumbo buffer */ | |
875 | buf = bge_jalloc(sc); | |
876 | if (buf == NULL) { | |
877 | m_freem(m_new); | |
c6fd6f3b JS |
878 | if_printf(&sc->arpcom.ac_if, "jumbo allocation failed " |
879 | "-- packet dropped!\n"); | |
20c9a969 | 880 | return ENOBUFS; |
984263bc MD |
881 | } |
882 | ||
883 | /* Attach the buffer to the mbuf. */ | |
2aa9b12f JS |
884 | m_new->m_ext.ext_arg = buf; |
885 | m_new->m_ext.ext_buf = buf->bge_buf; | |
b542cd49 JS |
886 | m_new->m_ext.ext_free = bge_jfree; |
887 | m_new->m_ext.ext_ref = bge_jref; | |
2aa9b12f JS |
888 | m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; |
889 | ||
2aa9b12f | 890 | m_new->m_flags |= M_EXT; |
984263bc | 891 | } else { |
20c9a969 | 892 | KKASSERT(m->m_flags & M_EXT); |
984263bc | 893 | m_new = m; |
20c9a969 | 894 | buf = m_new->m_ext.ext_arg; |
984263bc | 895 | } |
20c9a969 SZ |
896 | m_new->m_data = m_new->m_ext.ext_buf; |
897 | m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size; | |
984263bc | 898 | |
20c9a969 | 899 | paddr = buf->bge_paddr; |
0ecb11d7 | 900 | if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) { |
984263bc | 901 | m_adj(m_new, ETHER_ALIGN); |
20c9a969 SZ |
902 | paddr += ETHER_ALIGN; |
903 | } | |
904 | ||
984263bc | 905 | /* Set up the descriptor. */ |
984263bc | 906 | sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; |
20c9a969 SZ |
907 | |
908 | r = &sc->bge_ldata.bge_rx_jumbo_ring[i]; | |
909 | r->bge_addr.bge_addr_lo = BGE_ADDR_LO(paddr); | |
910 | r->bge_addr.bge_addr_hi = BGE_ADDR_HI(paddr); | |
984263bc MD |
911 | r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; |
912 | r->bge_len = m_new->m_len; | |
913 | r->bge_idx = i; | |
914 | ||
20c9a969 | 915 | return 0; |
984263bc MD |
916 | } |
917 | ||
918 | /* | |
919 | * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, | |
920 | * that's 1MB or memory, which is a lot. For now, we fill only the first | |
921 | * 256 ring entries and hope that our CPU is fast enough to keep up with | |
922 | * the NIC. | |
923 | */ | |
924 | static int | |
33c39a69 | 925 | bge_init_rx_ring_std(struct bge_softc *sc) |
984263bc MD |
926 | { |
927 | int i; | |
928 | ||
929 | for (i = 0; i < BGE_SSLOTS; i++) { | |
930 | if (bge_newbuf_std(sc, i, NULL) == ENOBUFS) | |
931 | return(ENOBUFS); | |
932 | }; | |
933 | ||
20c9a969 SZ |
934 | bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, |
935 | sc->bge_cdata.bge_rx_std_ring_map, | |
936 | BUS_DMASYNC_PREWRITE); | |
937 | ||
984263bc MD |
938 | sc->bge_std = i - 1; |
939 | CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); | |
940 | ||
941 | return(0); | |
942 | } | |
943 | ||
944 | static void | |
33c39a69 | 945 | bge_free_rx_ring_std(struct bge_softc *sc) |
984263bc MD |
946 | { |
947 | int i; | |
948 | ||
949 | for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { | |
950 | if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { | |
20c9a969 SZ |
951 | bus_dmamap_unload(sc->bge_cdata.bge_mtag, |
952 | sc->bge_cdata.bge_rx_std_dmamap[i]); | |
984263bc MD |
953 | m_freem(sc->bge_cdata.bge_rx_std_chain[i]); |
954 | sc->bge_cdata.bge_rx_std_chain[i] = NULL; | |
955 | } | |
20c9a969 | 956 | bzero(&sc->bge_ldata.bge_rx_std_ring[i], |
984263bc MD |
957 | sizeof(struct bge_rx_bd)); |
958 | } | |
984263bc MD |
959 | } |
960 | ||
961 | static int | |
33c39a69 | 962 | bge_init_rx_ring_jumbo(struct bge_softc *sc) |
984263bc MD |
963 | { |
964 | int i; | |
965 | struct bge_rcb *rcb; | |
966 | ||
967 | for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { | |
968 | if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) | |
969 | return(ENOBUFS); | |
970 | }; | |
971 | ||
20c9a969 SZ |
972 | bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, |
973 | sc->bge_cdata.bge_rx_jumbo_ring_map, | |
974 | BUS_DMASYNC_PREWRITE); | |
975 | ||
984263bc MD |
976 | sc->bge_jumbo = i - 1; |
977 | ||
20c9a969 | 978 | rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; |
984263bc MD |
979 | rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0); |
980 | CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); | |
981 | ||
982 | CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); | |
983 | ||
984 | return(0); | |
985 | } | |
986 | ||
987 | static void | |
33c39a69 | 988 | bge_free_rx_ring_jumbo(struct bge_softc *sc) |
984263bc MD |
989 | { |
990 | int i; | |
991 | ||
992 | for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { | |
993 | if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { | |
994 | m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); | |
995 | sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; | |
996 | } | |
20c9a969 | 997 | bzero(&sc->bge_ldata.bge_rx_jumbo_ring[i], |
984263bc MD |
998 | sizeof(struct bge_rx_bd)); |
999 | } | |
984263bc MD |
1000 | } |
1001 | ||
1002 | static void | |
33c39a69 | 1003 | bge_free_tx_ring(struct bge_softc *sc) |
984263bc MD |
1004 | { |
1005 | int i; | |
1006 | ||
984263bc MD |
1007 | for (i = 0; i < BGE_TX_RING_CNT; i++) { |
1008 | if (sc->bge_cdata.bge_tx_chain[i] != NULL) { | |
20c9a969 SZ |
1009 | bus_dmamap_unload(sc->bge_cdata.bge_mtag, |
1010 | sc->bge_cdata.bge_tx_dmamap[i]); | |
984263bc MD |
1011 | m_freem(sc->bge_cdata.bge_tx_chain[i]); |
1012 | sc->bge_cdata.bge_tx_chain[i] = NULL; | |
1013 | } | |
20c9a969 | 1014 | bzero(&sc->bge_ldata.bge_tx_ring[i], |
984263bc MD |
1015 | sizeof(struct bge_tx_bd)); |
1016 | } | |
984263bc MD |
1017 | } |
1018 | ||
1019 | static int | |
33c39a69 | 1020 | bge_init_tx_ring(struct bge_softc *sc) |
984263bc MD |
1021 | { |
1022 | sc->bge_txcnt = 0; | |
1023 | sc->bge_tx_saved_considx = 0; | |
94db8384 SZ |
1024 | sc->bge_tx_prodidx = 0; |
1025 | ||
1026 | /* Initialize transmit producer index for host-memory send ring. */ | |
1027 | CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); | |
984263bc | 1028 | |
984263bc MD |
1029 | /* 5700 b2 errata */ |
1030 | if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) | |
1031 | CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); | |
1032 | ||
1033 | CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); | |
1034 | /* 5700 b2 errata */ | |
1035 | if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) | |
1036 | CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); | |
1037 | ||
1038 | return(0); | |
1039 | } | |
1040 | ||
984263bc | 1041 | static void |
33c39a69 | 1042 | bge_setmulti(struct bge_softc *sc) |
984263bc MD |
1043 | { |
1044 | struct ifnet *ifp; | |
1045 | struct ifmultiaddr *ifma; | |
33c39a69 | 1046 | uint32_t hashes[4] = { 0, 0, 0, 0 }; |
984263bc MD |
1047 | int h, i; |
1048 | ||
1049 | ifp = &sc->arpcom.ac_if; | |
1050 | ||
1051 | if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { | |
1052 | for (i = 0; i < 4; i++) | |
1053 | CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); | |
1054 | return; | |
1055 | } | |
1056 | ||
1057 | /* First, zot all the existing filters. */ | |
1058 | for (i = 0; i < 4; i++) | |
1059 | CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); | |
1060 | ||
1061 | /* Now program new ones. */ | |
33c39a69 | 1062 | LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { |
984263bc MD |
1063 | if (ifma->ifma_addr->sa_family != AF_LINK) |
1064 | continue; | |
3b4ec5b8 JS |
1065 | h = ether_crc32_le( |
1066 | LLADDR((struct sockaddr_dl *)ifma->ifma_addr), | |
1067 | ETHER_ADDR_LEN) & 0x7f; | |
984263bc MD |
1068 | hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); |
1069 | } | |
1070 | ||
1071 | for (i = 0; i < 4; i++) | |
1072 | CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); | |
984263bc MD |
1073 | } |
1074 | ||
1075 | /* | |
1076 | * Do endian, PCI and DMA initialization. Also check the on-board ROM | |
1077 | * self-test results. | |
1078 | */ | |
1079 | static int | |
33c39a69 | 1080 | bge_chipinit(struct bge_softc *sc) |
984263bc | 1081 | { |
33c39a69 JS |
1082 | int i; |
1083 | uint32_t dma_rw_ctl; | |
984263bc | 1084 | |
20c9a969 SZ |
1085 | /* Set endian type before we access any non-PCI registers. */ |
1086 | pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4); | |
984263bc MD |
1087 | |
1088 | /* | |
1089 | * Check the 'ROM failed' bit on the RX CPU to see if | |
1090 | * self-tests passed. | |
1091 | */ | |
1092 | if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { | |
c6fd6f3b JS |
1093 | if_printf(&sc->arpcom.ac_if, |
1094 | "RX CPU self-diagnostics failed!\n"); | |
984263bc MD |
1095 | return(ENODEV); |
1096 | } | |
1097 | ||
1098 | /* Clear the MAC control register */ | |
1099 | CSR_WRITE_4(sc, BGE_MAC_MODE, 0); | |
1100 | ||
1101 | /* | |
1102 | * Clear the MAC statistics block in the NIC's | |
1103 | * internal memory. | |
1104 | */ | |
1105 | for (i = BGE_STATS_BLOCK; | |
33c39a69 | 1106 | i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) |
984263bc MD |
1107 | BGE_MEMWIN_WRITE(sc, i, 0); |
1108 | ||
1109 | for (i = BGE_STATUS_BLOCK; | |
33c39a69 | 1110 | i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) |
984263bc MD |
1111 | BGE_MEMWIN_WRITE(sc, i, 0); |
1112 | ||
1113 | /* Set up the PCI DMA control register. */ | |
0ecb11d7 | 1114 | if (sc->bge_flags & BGE_FLAG_PCIE) { |
9a6ee7e2 JS |
1115 | /* PCI Express */ |
1116 | dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | | |
1117 | (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | | |
1118 | (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); | |
0ecb11d7 | 1119 | } else if (sc->bge_flags & BGE_FLAG_PCIX) { |
984263bc | 1120 | /* PCI-X bus */ |
0ecb11d7 SZ |
1121 | if (BGE_IS_5714_FAMILY(sc)) { |
1122 | dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD; | |
1123 | dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */ | |
1124 | /* XXX magic values, Broadcom-supplied Linux driver */ | |
1125 | if (sc->bge_asicrev == BGE_ASICREV_BCM5780) { | |
1126 | dma_rw_ctl |= (1 << 20) | (1 << 18) | | |
1127 | BGE_PCIDMARWCTL_ONEDMA_ATONCE; | |
1128 | } else { | |
1129 | dma_rw_ctl |= (1 << 20) | (1 << 18) | (1 << 15); | |
1130 | } | |
1131 | } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { | |
1132 | /* | |
1133 | * The 5704 uses a different encoding of read/write | |
1134 | * watermarks. | |
1135 | */ | |
984263bc MD |
1136 | dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | |
1137 | (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | | |
1138 | (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); | |
0ecb11d7 | 1139 | } else { |
984263bc MD |
1140 | dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | |
1141 | (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | | |
1142 | (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | | |
1143 | (0x0F); | |
0ecb11d7 | 1144 | } |
984263bc MD |
1145 | |
1146 | /* | |
1147 | * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround | |
1148 | * for hardware bugs. | |
1149 | */ | |
1150 | if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || | |
1151 | sc->bge_asicrev == BGE_ASICREV_BCM5704) { | |
33c39a69 | 1152 | uint32_t tmp; |
984263bc MD |
1153 | |
1154 | tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f; | |
1155 | if (tmp == 0x6 || tmp == 0x7) | |
1156 | dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; | |
1157 | } | |
0ecb11d7 SZ |
1158 | } else { |
1159 | /* Conventional PCI bus */ | |
1160 | dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | | |
1161 | (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | | |
1162 | (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | | |
1163 | (0x0F); | |
984263bc MD |
1164 | } |
1165 | ||
1166 | if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || | |
7e40b8c5 | 1167 | sc->bge_asicrev == BGE_ASICREV_BCM5704 || |
0ecb11d7 | 1168 | sc->bge_asicrev == BGE_ASICREV_BCM5705) |
984263bc MD |
1169 | dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; |
1170 | pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); | |
1171 | ||
1172 | /* | |
1173 | * Set up general mode register. | |
1174 | */ | |
20c9a969 | 1175 | CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS| |
984263bc | 1176 | BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| |
bf29e666 | 1177 | BGE_MODECTL_TX_NO_PHDR_CSUM); |
984263bc MD |
1178 | |
1179 | /* | |
1180 | * Disable memory write invalidate. Apparently it is not supported | |
1181 | * properly by these devices. | |
1182 | */ | |
1183 | PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4); | |
1184 | ||
984263bc MD |
1185 | /* Set the timer prescaler (always 66Mhz) */ |
1186 | CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); | |
1187 | ||
1188 | return(0); | |
1189 | } | |
1190 | ||
1191 | static int | |
33c39a69 | 1192 | bge_blockinit(struct bge_softc *sc) |
984263bc MD |
1193 | { |
1194 | struct bge_rcb *rcb; | |
20c9a969 SZ |
1195 | bus_size_t vrcb; |
1196 | bge_hostaddr taddr; | |
0ecb11d7 | 1197 | uint32_t val; |
984263bc MD |
1198 | int i; |
1199 | ||
1200 | /* | |
1201 | * Initialize the memory window pointer register so that | |
1202 | * we can access the first 32K of internal NIC RAM. This will | |
1203 | * allow us to set up the TX send ring RCBs and the RX return | |
1204 | * ring RCBs, plus other things which live in NIC memory. | |
1205 | */ | |
1206 | CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); | |
1207 | ||
7e40b8c5 HP |
1208 | /* Note: the BCM5704 has a smaller mbuf space than other chips. */ |
1209 | ||
0ecb11d7 | 1210 | if (!BGE_IS_5705_PLUS(sc)) { |
7e40b8c5 | 1211 | /* Configure mbuf memory pool */ |
0ecb11d7 SZ |
1212 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); |
1213 | if (sc->bge_asicrev == BGE_ASICREV_BCM5704) | |
1214 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); | |
1215 | else | |
1216 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); | |
984263bc | 1217 | |
7e40b8c5 HP |
1218 | /* Configure DMA resource pool */ |
1219 | CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, | |
1220 | BGE_DMA_DESCRIPTORS); | |
1221 | CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); | |
1222 | } | |
984263bc MD |
1223 | |
1224 | /* Configure mbuf pool watermarks */ | |
0ecb11d7 | 1225 | if (BGE_IS_5705_PLUS(sc)) { |
7e40b8c5 HP |
1226 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); |
1227 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); | |
1228 | } else { | |
1229 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); | |
1230 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); | |
1231 | } | |
984263bc MD |
1232 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); |
1233 | ||
1234 | /* Configure DMA resource watermarks */ | |
1235 | CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); | |
1236 | CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); | |
1237 | ||
1238 | /* Enable buffer manager */ | |
0ecb11d7 | 1239 | if (!BGE_IS_5705_PLUS(sc)) { |
7e40b8c5 HP |
1240 | CSR_WRITE_4(sc, BGE_BMAN_MODE, |
1241 | BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); | |
984263bc | 1242 | |
7e40b8c5 HP |
1243 | /* Poll for buffer manager start indication */ |
1244 | for (i = 0; i < BGE_TIMEOUT; i++) { | |
1245 | if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) | |
1246 | break; | |
1247 | DELAY(10); | |
1248 | } | |
984263bc | 1249 | |
7e40b8c5 | 1250 | if (i == BGE_TIMEOUT) { |
c6fd6f3b JS |
1251 | if_printf(&sc->arpcom.ac_if, |
1252 | "buffer manager failed to start\n"); | |
7e40b8c5 HP |
1253 | return(ENXIO); |
1254 | } | |
984263bc MD |
1255 | } |
1256 | ||
1257 | /* Enable flow-through queues */ | |
1258 | CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); | |
1259 | CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); | |
1260 | ||
1261 | /* Wait until queue initialization is complete */ | |
1262 | for (i = 0; i < BGE_TIMEOUT; i++) { | |
1263 | if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) | |
1264 | break; | |
1265 | DELAY(10); | |
1266 | } | |
1267 | ||
1268 | if (i == BGE_TIMEOUT) { | |
c6fd6f3b JS |
1269 | if_printf(&sc->arpcom.ac_if, |
1270 | "flow-through queue init failed\n"); | |
984263bc MD |
1271 | return(ENXIO); |
1272 | } | |
1273 | ||
1274 | /* Initialize the standard RX ring control block */ | |
20c9a969 SZ |
1275 | rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb; |
1276 | rcb->bge_hostaddr.bge_addr_lo = | |
1277 | BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr); | |
1278 | rcb->bge_hostaddr.bge_addr_hi = | |
1279 | BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr); | |
1280 | bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, | |
1281 | sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD); | |
0ecb11d7 | 1282 | if (BGE_IS_5705_PLUS(sc)) |
7e40b8c5 HP |
1283 | rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); |
1284 | else | |
1285 | rcb->bge_maxlen_flags = | |
1286 | BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); | |
0ecb11d7 | 1287 | rcb->bge_nicaddr = BGE_STD_RX_RINGS; |
984263bc MD |
1288 | CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); |
1289 | CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); | |
1290 | CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); | |
1291 | CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); | |
1292 | ||
1293 | /* | |
1294 | * Initialize the jumbo RX ring control block | |
1295 | * We set the 'ring disabled' bit in the flags | |
1296 | * field until we're actually ready to start | |
1297 | * using this ring (i.e. once we set the MTU | |
1298 | * high enough to require it). | |
1299 | */ | |
0ecb11d7 | 1300 | if (BGE_IS_JUMBO_CAPABLE(sc)) { |
20c9a969 SZ |
1301 | rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; |
1302 | ||
1303 | rcb->bge_hostaddr.bge_addr_lo = | |
1304 | BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr); | |
1305 | rcb->bge_hostaddr.bge_addr_hi = | |
1306 | BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr); | |
1307 | bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, | |
1308 | sc->bge_cdata.bge_rx_jumbo_ring_map, | |
1309 | BUS_DMASYNC_PREREAD); | |
7e40b8c5 HP |
1310 | rcb->bge_maxlen_flags = |
1311 | BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, | |
1312 | BGE_RCB_FLAG_RING_DISABLED); | |
0ecb11d7 | 1313 | rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; |
7e40b8c5 HP |
1314 | CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, |
1315 | rcb->bge_hostaddr.bge_addr_hi); | |
1316 | CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, | |
1317 | rcb->bge_hostaddr.bge_addr_lo); | |
1318 | CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, | |
1319 | rcb->bge_maxlen_flags); | |
1320 | CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); | |
1321 | ||
1322 | /* Set up dummy disabled mini ring RCB */ | |
20c9a969 | 1323 | rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb; |
7e40b8c5 HP |
1324 | rcb->bge_maxlen_flags = |
1325 | BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); | |
1326 | CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, | |
1327 | rcb->bge_maxlen_flags); | |
1328 | } | |
984263bc MD |
1329 | |
1330 | /* | |
1331 | * Set the BD ring replentish thresholds. The recommended | |
1332 | * values are 1/8th the number of descriptors allocated to | |
1333 | * each ring. | |
1334 | */ | |
0ecb11d7 SZ |
1335 | if (BGE_IS_5705_PLUS(sc)) |
1336 | val = 8; | |
1337 | else | |
1338 | val = BGE_STD_RX_RING_CNT / 8; | |
1339 | CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val); | |
984263bc MD |
1340 | CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); |
1341 | ||
1342 | /* | |
1343 | * Disable all unused send rings by setting the 'ring disabled' | |
1344 | * bit in the flags field of all the TX send ring control blocks. | |
1345 | * These are located in NIC memory. | |
1346 | */ | |
20c9a969 | 1347 | vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; |
984263bc | 1348 | for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { |
20c9a969 SZ |
1349 | RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, |
1350 | BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); | |
1351 | RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); | |
1352 | vrcb += sizeof(struct bge_rcb); | |
984263bc MD |
1353 | } |
1354 | ||
1355 | /* Configure TX RCB 0 (we use only the first ring) */ | |
20c9a969 SZ |
1356 | vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; |
1357 | BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr); | |
1358 | RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); | |
1359 | RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); | |
1360 | RCB_WRITE_4(sc, vrcb, bge_nicaddr, | |
1361 | BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); | |
0ecb11d7 | 1362 | if (!BGE_IS_5705_PLUS(sc)) { |
20c9a969 SZ |
1363 | RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, |
1364 | BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); | |
1365 | } | |
984263bc MD |
1366 | |
1367 | /* Disable all unused RX return rings */ | |
20c9a969 | 1368 | vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; |
984263bc | 1369 | for (i = 0; i < BGE_RX_RINGS_MAX; i++) { |
20c9a969 SZ |
1370 | RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); |
1371 | RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); | |
1372 | RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, | |
7e40b8c5 | 1373 | BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, |
20c9a969 SZ |
1374 | BGE_RCB_FLAG_RING_DISABLED)); |
1375 | RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); | |
984263bc | 1376 | CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + |
33c39a69 | 1377 | (i * (sizeof(uint64_t))), 0); |
20c9a969 | 1378 | vrcb += sizeof(struct bge_rcb); |
984263bc MD |
1379 | } |
1380 | ||
1381 | /* Initialize RX ring indexes */ | |
1382 | CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); | |
1383 | CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); | |
1384 | CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); | |
1385 | ||
1386 | /* | |
1387 | * Set up RX return ring 0 | |
1388 | * Note that the NIC address for RX return rings is 0x00000000. | |
1389 | * The return rings live entirely within the host, so the | |
1390 | * nicaddr field in the RCB isn't used. | |
1391 | */ | |
20c9a969 SZ |
1392 | vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; |
1393 | BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr); | |
1394 | RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); | |
1395 | RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); | |
1396 | RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000); | |
1397 | RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, | |
1398 | BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); | |
984263bc MD |
1399 | |
1400 | /* Set random backoff seed for TX */ | |
1401 | CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, | |
1402 | sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + | |
1403 | sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + | |
1404 | sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] + | |
1405 | BGE_TX_BACKOFF_SEED_MASK); | |
1406 | ||
1407 | /* Set inter-packet gap */ | |
1408 | CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); | |
1409 | ||
1410 | /* | |
1411 | * Specify which ring to use for packets that don't match | |
1412 | * any RX rules. | |
1413 | */ | |
1414 | CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); | |
1415 | ||
1416 | /* | |
1417 | * Configure number of RX lists. One interrupt distribution | |
1418 | * list, sixteen active lists, one bad frames class. | |
1419 | */ | |
1420 | CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); | |
1421 | ||
1422 | /* Inialize RX list placement stats mask. */ | |
1423 | CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); | |
1424 | CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); | |
1425 | ||
1426 | /* Disable host coalescing until we get it set up */ | |
1427 | CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); | |
1428 | ||
1429 | /* Poll to make sure it's shut down. */ | |
1430 | for (i = 0; i < BGE_TIMEOUT; i++) { | |
1431 | if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) | |
1432 | break; | |
1433 | DELAY(10); | |
1434 | } | |
1435 | ||
1436 | if (i == BGE_TIMEOUT) { | |
c6fd6f3b JS |
1437 | if_printf(&sc->arpcom.ac_if, |
1438 | "host coalescing engine failed to idle\n"); | |
984263bc MD |
1439 | return(ENXIO); |
1440 | } | |
1441 | ||
1442 | /* Set up host coalescing defaults */ | |
1443 | CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); | |
1444 | CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); | |
1445 | CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); | |
1446 | CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); | |
0ecb11d7 | 1447 | if (!BGE_IS_5705_PLUS(sc)) { |
7e40b8c5 HP |
1448 | CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); |
1449 | CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); | |
1450 | } | |
ba39cc82 SZ |
1451 | CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1); |
1452 | CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1); | |
984263bc MD |
1453 | |
1454 | /* Set up address of statistics block */ | |
0ecb11d7 | 1455 | if (!BGE_IS_5705_PLUS(sc)) { |
20c9a969 SZ |
1456 | CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, |
1457 | BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr)); | |
7e40b8c5 | 1458 | CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, |
20c9a969 | 1459 | BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr)); |
7e40b8c5 HP |
1460 | |
1461 | CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); | |
1462 | CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); | |
1463 | CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); | |
1464 | } | |
984263bc MD |
1465 | |
1466 | /* Set up address of status block */ | |
20c9a969 SZ |
1467 | CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, |
1468 | BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr)); | |
984263bc | 1469 | CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, |
20c9a969 SZ |
1470 | BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr)); |
1471 | sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0; | |
1472 | sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0; | |
984263bc MD |
1473 | |
1474 | /* Turn on host coalescing state machine */ | |
1475 | CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); | |
1476 | ||
1477 | /* Turn on RX BD completion state machine and enable attentions */ | |
1478 | CSR_WRITE_4(sc, BGE_RBDC_MODE, | |
1479 | BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); | |
1480 | ||
1481 | /* Turn on RX list placement state machine */ | |
1482 | CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); | |
1483 | ||
1484 | /* Turn on RX list selector state machine. */ | |
0ecb11d7 | 1485 | if (!BGE_IS_5705_PLUS(sc)) |
7e40b8c5 | 1486 | CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); |
984263bc MD |
1487 | |
1488 | /* Turn on DMA, clear stats */ | |
1489 | CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| | |
1490 | BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| | |
1491 | BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| | |
1492 | BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| | |
0ecb11d7 SZ |
1493 | ((sc->bge_flags & BGE_FLAG_TBI) ? |
1494 | BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); | |
984263bc MD |
1495 | |
1496 | /* Set misc. local control, enable interrupts on attentions */ | |
1497 | CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); | |
1498 | ||
1499 | #ifdef notdef | |
1500 | /* Assert GPIO pins for PHY reset */ | |
1501 | BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| | |
1502 | BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); | |
1503 | BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| | |
1504 | BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); | |
1505 | #endif | |
1506 | ||
1507 | /* Turn on DMA completion state machine */ | |
0ecb11d7 | 1508 | if (!BGE_IS_5705_PLUS(sc)) |
7e40b8c5 | 1509 | CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); |
984263bc MD |
1510 | |
1511 | /* Turn on write DMA state machine */ | |
0ecb11d7 SZ |
1512 | val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS; |
1513 | if (sc->bge_asicrev == BGE_ASICREV_BCM5755 || | |
1514 | sc->bge_asicrev == BGE_ASICREV_BCM5787) | |
1515 | val |= (1 << 29); /* Enable host coalescing bug fix. */ | |
1516 | CSR_WRITE_4(sc, BGE_WDMA_MODE, val); | |
984263bc MD |
1517 | |
1518 | /* Turn on read DMA state machine */ | |
1519 | CSR_WRITE_4(sc, BGE_RDMA_MODE, | |
1520 | BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS); | |
1521 | ||
1522 | /* Turn on RX data completion state machine */ | |
1523 | CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); | |
1524 | ||
1525 | /* Turn on RX BD initiator state machine */ | |
1526 | CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); | |
1527 | ||
1528 | /* Turn on RX data and RX BD initiator state machine */ | |
1529 | CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); | |
1530 | ||
1531 | /* Turn on Mbuf cluster free state machine */ | |
0ecb11d7 | 1532 | if (!BGE_IS_5705_PLUS(sc)) |
7e40b8c5 | 1533 | CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); |
984263bc MD |
1534 | |
1535 | /* Turn on send BD completion state machine */ | |
1536 | CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); | |
1537 | ||
1538 | /* Turn on send data completion state machine */ | |
1539 | CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); | |
1540 | ||
1541 | /* Turn on send data initiator state machine */ | |
1542 | CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); | |
1543 | ||
1544 | /* Turn on send BD initiator state machine */ | |
1545 | CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); | |
1546 | ||
1547 | /* Turn on send BD selector state machine */ | |
1548 | CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); | |
1549 | ||
1550 | CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); | |
1551 | CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, | |
1552 | BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); | |
1553 | ||
1554 | /* ack/clear link change events */ | |
1555 | CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| | |
7e40b8c5 HP |
1556 | BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| |
1557 | BGE_MACSTAT_LINK_CHANGED); | |
20c9a969 | 1558 | CSR_WRITE_4(sc, BGE_MI_STS, 0); |
984263bc MD |
1559 | |
1560 | /* Enable PHY auto polling (for MII/GMII only) */ | |
0ecb11d7 | 1561 | if (sc->bge_flags & BGE_FLAG_TBI) { |
984263bc MD |
1562 | CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); |
1563 | } else { | |
1564 | BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); | |
db861466 SZ |
1565 | if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && |
1566 | sc->bge_chipid != BGE_CHIPID_BCM5700_B2) { | |
984263bc MD |
1567 | CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, |
1568 | BGE_EVTENB_MI_INTERRUPT); | |
db861466 | 1569 | } |
984263bc MD |
1570 | } |
1571 | ||
db861466 SZ |
1572 | /* |
1573 | * Clear any pending link state attention. | |
1574 | * Otherwise some link state change events may be lost until attention | |
1575 | * is cleared by bge_intr() -> bge_softc.bge_link_upd() sequence. | |
1576 | * It's not necessary on newer BCM chips - perhaps enabling link | |
1577 | * state change attentions implies clearing pending attention. | |
1578 | */ | |
1579 | CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| | |
1580 | BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| | |
1581 | BGE_MACSTAT_LINK_CHANGED); | |
1582 | ||
984263bc MD |
1583 | /* Enable link state change attentions. */ |
1584 | BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); | |
1585 | ||
1586 | return(0); | |
1587 | } | |
1588 | ||
1589 | /* | |
1590 | * Probe for a Broadcom chip. Check the PCI vendor and device IDs | |
1591 | * against our list and return its name if we find a match. Note | |
1592 | * that since the Broadcom controller contains VPD support, we | |
1593 | * can get the device name string from the controller itself instead | |
1594 | * of the compiled-in string. This is a little slow, but it guarantees | |
1595 | * we'll always announce the right product name. | |
1596 | */ | |
1597 | static int | |
33c39a69 | 1598 | bge_probe(device_t dev) |
984263bc | 1599 | { |
984263bc | 1600 | struct bge_softc *sc; |
33c39a69 | 1601 | struct bge_type *t; |
984263bc | 1602 | char *descbuf; |
33c39a69 JS |
1603 | uint16_t product, vendor; |
1604 | ||
1605 | product = pci_get_device(dev); | |
1606 | vendor = pci_get_vendor(dev); | |
1607 | ||
1608 | for (t = bge_devs; t->bge_name != NULL; t++) { | |
1609 | if (vendor == t->bge_vid && product == t->bge_did) | |
1610 | break; | |
1611 | } | |
984263bc | 1612 | |
33c39a69 JS |
1613 | if (t->bge_name == NULL) |
1614 | return(ENXIO); | |
984263bc MD |
1615 | |
1616 | sc = device_get_softc(dev); | |
efda3bd0 | 1617 | descbuf = kmalloc(BGE_DEVDESC_MAX, M_TEMP, M_WAITOK); |
f8c7a42d | 1618 | ksnprintf(descbuf, BGE_DEVDESC_MAX, "%s, ASIC rev. %#04x", t->bge_name, |
33c39a69 JS |
1619 | pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16); |
1620 | device_set_desc_copy(dev, descbuf); | |
1621 | if (pci_get_subvendor(dev) == PCI_VENDOR_DELL) | |
0ecb11d7 | 1622 | sc->bge_flags |= BGE_FLAG_NO_3LED; |
efda3bd0 | 1623 | kfree(descbuf, M_TEMP); |
33c39a69 | 1624 | return(0); |
984263bc MD |
1625 | } |
1626 | ||
1627 | static int | |
33c39a69 | 1628 | bge_attach(device_t dev) |
984263bc | 1629 | { |
984263bc MD |
1630 | struct ifnet *ifp; |
1631 | struct bge_softc *sc; | |
33c39a69 JS |
1632 | uint32_t hwcfg = 0; |
1633 | uint32_t mac_addr = 0; | |
c6fd6f3b | 1634 | int error = 0, rid; |
0a8b5977 | 1635 | uint8_t ether_addr[ETHER_ADDR_LEN]; |
984263bc | 1636 | |
984263bc | 1637 | sc = device_get_softc(dev); |
984263bc | 1638 | sc->bge_dev = dev; |
263489fb | 1639 | callout_init(&sc->bge_stat_timer); |
16dca0df | 1640 | lwkt_serialize_init(&sc->bge_jslot_serializer); |
984263bc MD |
1641 | |
1642 | /* | |
1643 | * Map control/status registers. | |
1644 | */ | |
cc8ddf9e | 1645 | pci_enable_busmaster(dev); |
984263bc MD |
1646 | |
1647 | rid = BGE_PCI_BAR0; | |
cc8ddf9e JS |
1648 | sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, |
1649 | RF_ACTIVE); | |
984263bc MD |
1650 | |
1651 | if (sc->bge_res == NULL) { | |
c6fd6f3b | 1652 | device_printf(dev, "couldn't map memory\n"); |
baf731bb | 1653 | return ENXIO; |
984263bc MD |
1654 | } |
1655 | ||
1656 | sc->bge_btag = rman_get_bustag(sc->bge_res); | |
1657 | sc->bge_bhandle = rman_get_bushandle(sc->bge_res); | |
984263bc | 1658 | |
9a6ee7e2 JS |
1659 | /* Save ASIC rev. */ |
1660 | sc->bge_chipid = | |
1661 | pci_read_config(dev, BGE_PCI_MISC_CTL, 4) & | |
1662 | BGE_PCIMISCCTL_ASICREV; | |
1663 | sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid); | |
1664 | sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid); | |
1665 | ||
0ecb11d7 SZ |
1666 | /* Save chipset family. */ |
1667 | switch (sc->bge_asicrev) { | |
1668 | case BGE_ASICREV_BCM5700: | |
1669 | case BGE_ASICREV_BCM5701: | |
1670 | case BGE_ASICREV_BCM5703: | |
1671 | case BGE_ASICREV_BCM5704: | |
1672 | sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO; | |
1673 | break; | |
1674 | ||
1675 | case BGE_ASICREV_BCM5714_A0: | |
1676 | case BGE_ASICREV_BCM5780: | |
1677 | case BGE_ASICREV_BCM5714: | |
1678 | sc->bge_flags |= BGE_FLAG_5714_FAMILY; | |
1679 | /* Fall through */ | |
1680 | ||
1681 | case BGE_ASICREV_BCM5750: | |
1682 | case BGE_ASICREV_BCM5752: | |
1683 | case BGE_ASICREV_BCM5755: | |
1684 | case BGE_ASICREV_BCM5787: | |
1685 | sc->bge_flags |= BGE_FLAG_575X_PLUS; | |
1686 | /* Fall through */ | |
1687 | ||
1688 | case BGE_ASICREV_BCM5705: | |
1689 | sc->bge_flags |= BGE_FLAG_5705_PLUS; | |
1690 | break; | |
1691 | } | |
9a6ee7e2 JS |
1692 | |
1693 | /* | |
0ecb11d7 | 1694 | * Set various quirk flags. |
9a6ee7e2 | 1695 | */ |
9a6ee7e2 | 1696 | |
0ecb11d7 SZ |
1697 | sc->bge_flags |= BGE_FLAG_ETH_WIRESPEED; |
1698 | if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || | |
1699 | (sc->bge_asicrev == BGE_ASICREV_BCM5705 && | |
1700 | (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 && | |
1701 | sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) || | |
1702 | sc->bge_asicrev == BGE_ASICREV_BCM5906) | |
1703 | sc->bge_flags &= ~BGE_FLAG_ETH_WIRESPEED; | |
1704 | ||
1705 | if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || | |
1706 | sc->bge_chipid == BGE_CHIPID_BCM5701_B0) | |
1707 | sc->bge_flags |= BGE_FLAG_CRC_BUG; | |
1708 | ||
1709 | if (sc->bge_chiprev == BGE_CHIPREV_5703_AX || | |
1710 | sc->bge_chiprev == BGE_CHIPREV_5704_AX) | |
1711 | sc->bge_flags |= BGE_FLAG_ADC_BUG; | |
1712 | ||
1713 | if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) | |
1714 | sc->bge_flags |= BGE_FLAG_5704_A0_BUG; | |
1715 | ||
1716 | if (BGE_IS_5705_PLUS(sc)) { | |
1717 | if (sc->bge_asicrev == BGE_ASICREV_BCM5755 || | |
1718 | sc->bge_asicrev == BGE_ASICREV_BCM5787) { | |
1719 | uint32_t product = pci_get_device(dev); | |
1720 | ||
1721 | if (product != PCI_PRODUCT_BROADCOM_BCM5722 && | |
1722 | product != PCI_PRODUCT_BROADCOM_BCM5756) | |
1723 | sc->bge_flags |= BGE_FLAG_JITTER_BUG; | |
1724 | if (product == PCI_PRODUCT_BROADCOM_BCM5755M) | |
1725 | sc->bge_flags |= BGE_FLAG_ADJUST_TRIM; | |
1726 | } else if (sc->bge_asicrev != BGE_ASICREV_BCM5906) { | |
1727 | sc->bge_flags |= BGE_FLAG_BER_BUG; | |
9a6ee7e2 JS |
1728 | } |
1729 | } | |
1730 | ||
0ecb11d7 SZ |
1731 | /* Allocate interrupt */ |
1732 | rid = 0; | |
1733 | ||
1734 | sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, | |
1735 | RF_SHAREABLE | RF_ACTIVE); | |
1736 | ||
1737 | if (sc->bge_irq == NULL) { | |
1738 | device_printf(dev, "couldn't map interrupt\n"); | |
1739 | error = ENXIO; | |
1740 | goto fail; | |
1741 | } | |
1742 | ||
1743 | /* | |
1744 | * Check if this is a PCI-X or PCI Express device. | |
1745 | */ | |
1746 | if (BGE_IS_5705_PLUS(sc)) { | |
1747 | uint32_t reg; | |
1748 | ||
1749 | reg = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4); | |
1750 | if ((reg & 0xff) == BGE_PCIE_CAPID) | |
1751 | sc->bge_flags |= BGE_FLAG_PCIE; | |
1752 | } else { | |
1753 | /* | |
1754 | * Check if the device is in PCI-X Mode. | |
1755 | * (This bit is not valid on PCI Express controllers.) | |
1756 | */ | |
1757 | if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) & | |
1758 | BGE_PCISTATE_PCI_BUSMODE) == 0) | |
1759 | sc->bge_flags |= BGE_FLAG_PCIX; | |
1760 | } | |
1761 | ||
c6fd6f3b JS |
1762 | ifp = &sc->arpcom.ac_if; |
1763 | if_initname(ifp, device_get_name(dev), device_get_unit(dev)); | |
984263bc MD |
1764 | |
1765 | /* Try to reset the chip. */ | |
1766 | bge_reset(sc); | |
1767 | ||
1768 | if (bge_chipinit(sc)) { | |
c6fd6f3b | 1769 | device_printf(dev, "chip initialization failed\n"); |
984263bc MD |
1770 | error = ENXIO; |
1771 | goto fail; | |
1772 | } | |
1773 | ||
1774 | /* | |
1775 | * Get station address from the EEPROM. | |
1776 | */ | |
1777 | mac_addr = bge_readmem_ind(sc, 0x0c14); | |
1778 | if ((mac_addr >> 16) == 0x484b) { | |
0a8b5977 JS |
1779 | ether_addr[0] = (uint8_t)(mac_addr >> 8); |
1780 | ether_addr[1] = (uint8_t)mac_addr; | |
984263bc | 1781 | mac_addr = bge_readmem_ind(sc, 0x0c18); |
0a8b5977 JS |
1782 | ether_addr[2] = (uint8_t)(mac_addr >> 24); |
1783 | ether_addr[3] = (uint8_t)(mac_addr >> 16); | |
1784 | ether_addr[4] = (uint8_t)(mac_addr >> 8); | |
1785 | ether_addr[5] = (uint8_t)mac_addr; | |
1786 | } else if (bge_read_eeprom(sc, ether_addr, | |
984263bc | 1787 | BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { |
c6fd6f3b | 1788 | device_printf(dev, "failed to read station address\n"); |
984263bc MD |
1789 | error = ENXIO; |
1790 | goto fail; | |
1791 | } | |
1792 | ||
20c9a969 | 1793 | /* 5705/5750 limits RX return ring to 512 entries. */ |
0ecb11d7 | 1794 | if (BGE_IS_5705_PLUS(sc)) |
20c9a969 SZ |
1795 | sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; |
1796 | else | |
1797 | sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; | |
984263bc | 1798 | |
20c9a969 SZ |
1799 | error = bge_dma_alloc(sc); |
1800 | if (error) | |
984263bc | 1801 | goto fail; |
984263bc MD |
1802 | |
1803 | /* Set default tuneable values. */ | |
1804 | sc->bge_stat_ticks = BGE_TICKS_PER_SEC; | |
055d06f0 SZ |
1805 | sc->bge_rx_coal_ticks = bge_rx_coal_ticks; |
1806 | sc->bge_tx_coal_ticks = bge_tx_coal_ticks; | |
1807 | sc->bge_rx_max_coal_bds = bge_rx_max_coal_bds; | |
1808 | sc->bge_tx_max_coal_bds = bge_tx_max_coal_bds; | |
984263bc MD |
1809 | |
1810 | /* Set up ifnet structure */ | |
984263bc | 1811 | ifp->if_softc = sc; |
984263bc MD |
1812 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
1813 | ifp->if_ioctl = bge_ioctl; | |
984263bc | 1814 | ifp->if_start = bge_start; |
315fe0ee MD |
1815 | #ifdef DEVICE_POLLING |
1816 | ifp->if_poll = bge_poll; | |
1817 | #endif | |
984263bc MD |
1818 | ifp->if_watchdog = bge_watchdog; |
1819 | ifp->if_init = bge_init; | |
1820 | ifp->if_mtu = ETHERMTU; | |
cb623c48 | 1821 | ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; |
936ff230 JS |
1822 | ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1); |
1823 | ifq_set_ready(&ifp->if_snd); | |
cb623c48 SZ |
1824 | |
1825 | /* | |
1826 | * 5700 B0 chips do not support checksumming correctly due | |
1827 | * to hardware bugs. | |
1828 | */ | |
1829 | if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) { | |
1830 | ifp->if_capabilities |= IFCAP_HWCSUM; | |
1831 | ifp->if_hwassist = BGE_CSUM_FEATURES; | |
1832 | } | |
984263bc MD |
1833 | ifp->if_capenable = ifp->if_capabilities; |
1834 | ||
984263bc MD |
1835 | /* |
1836 | * Figure out what sort of media we have by checking the | |
1837 | * hardware config word in the first 32k of NIC internal memory, | |
1838 | * or fall back to examining the EEPROM if necessary. | |
1839 | * Note: on some BCM5700 cards, this value appears to be unset. | |
1840 | * If that's the case, we have to rely on identifying the NIC | |
1841 | * by its PCI subsystem ID, as we do below for the SysKonnect | |
1842 | * SK-9D41. | |
1843 | */ | |
1844 | if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) | |
1845 | hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); | |
1846 | else { | |
7b47d9c2 SZ |
1847 | if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, |
1848 | sizeof(hwcfg))) { | |
1849 | device_printf(dev, "failed to read EEPROM\n"); | |
1850 | error = ENXIO; | |
1851 | goto fail; | |
1852 | } | |
984263bc MD |
1853 | hwcfg = ntohl(hwcfg); |
1854 | } | |
1855 | ||
1856 | if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) | |
0ecb11d7 | 1857 | sc->bge_flags |= BGE_FLAG_TBI; |
984263bc MD |
1858 | |
1859 | /* The SysKonnect SK-9D41 is a 1000baseSX card. */ | |
cc8ddf9e | 1860 | if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41) |
0ecb11d7 | 1861 | sc->bge_flags |= BGE_FLAG_TBI; |
984263bc | 1862 | |
0ecb11d7 | 1863 | if (sc->bge_flags & BGE_FLAG_TBI) { |
984263bc MD |
1864 | ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, |
1865 | bge_ifmedia_upd, bge_ifmedia_sts); | |
1866 | ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); | |
1867 | ifmedia_add(&sc->bge_ifmedia, | |
1868 | IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); | |
1869 | ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); | |
1870 | ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); | |
70059b3c | 1871 | sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; |
984263bc MD |
1872 | } else { |
1873 | /* | |
1874 | * Do transceiver setup. | |
1875 | */ | |
1876 | if (mii_phy_probe(dev, &sc->bge_miibus, | |
1877 | bge_ifmedia_upd, bge_ifmedia_sts)) { | |
c6fd6f3b | 1878 | device_printf(dev, "MII without any PHY!\n"); |
984263bc MD |
1879 | error = ENXIO; |
1880 | goto fail; | |
1881 | } | |
1882 | } | |
1883 | ||
1884 | /* | |
1885 | * When using the BCM5701 in PCI-X mode, data corruption has | |
1886 | * been observed in the first few bytes of some received packets. | |
1887 | * Aligning the packet buffer in memory eliminates the corruption. | |
1888 | * Unfortunately, this misaligns the packet payloads. On platforms | |
1889 | * which do not support unaligned accesses, we will realign the | |
1890 | * payloads by copying the received packets. | |
1891 | */ | |
0ecb11d7 SZ |
1892 | if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && |
1893 | (sc->bge_flags & BGE_FLAG_PCIX)) | |
1894 | sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG; | |
984263bc | 1895 | |
db861466 SZ |
1896 | if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && |
1897 | sc->bge_chipid != BGE_CHIPID_BCM5700_B2) { | |
1898 | sc->bge_link_upd = bge_bcm5700_link_upd; | |
1899 | sc->bge_link_chg = BGE_MACSTAT_MI_INTERRUPT; | |
0ecb11d7 | 1900 | } else if (sc->bge_flags & BGE_FLAG_TBI) { |
db861466 SZ |
1901 | sc->bge_link_upd = bge_tbi_link_upd; |
1902 | sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED; | |
1903 | } else { | |
1904 | sc->bge_link_upd = bge_copper_link_upd; | |
1905 | sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED; | |
1906 | } | |
1907 | ||
055d06f0 SZ |
1908 | /* |
1909 | * Create sysctl nodes. | |
1910 | */ | |
1911 | sysctl_ctx_init(&sc->bge_sysctl_ctx); | |
1912 | sc->bge_sysctl_tree = SYSCTL_ADD_NODE(&sc->bge_sysctl_ctx, | |
1913 | SYSCTL_STATIC_CHILDREN(_hw), | |
1914 | OID_AUTO, | |
1915 | device_get_nameunit(dev), | |
1916 | CTLFLAG_RD, 0, ""); | |
1917 | if (sc->bge_sysctl_tree == NULL) { | |
1918 | device_printf(dev, "can't add sysctl node\n"); | |
1919 | error = ENXIO; | |
1920 | goto fail; | |
1921 | } | |
1922 | ||
1923 | SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx, | |
1924 | SYSCTL_CHILDREN(sc->bge_sysctl_tree), | |
1925 | OID_AUTO, "rx_coal_ticks", | |
1926 | CTLTYPE_INT | CTLFLAG_RW, | |
1927 | sc, 0, bge_sysctl_rx_coal_ticks, "I", | |
1928 | "Receive coalescing ticks (usec)."); | |
1929 | SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx, | |
1930 | SYSCTL_CHILDREN(sc->bge_sysctl_tree), | |
1931 | OID_AUTO, "tx_coal_ticks", | |
1932 | CTLTYPE_INT | CTLFLAG_RW, | |
1933 | sc, 0, bge_sysctl_tx_coal_ticks, "I", | |
1934 | "Transmit coalescing ticks (usec)."); | |
1935 | SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx, | |
1936 | SYSCTL_CHILDREN(sc->bge_sysctl_tree), | |
1937 | OID_AUTO, "rx_max_coal_bds", | |
1938 | CTLTYPE_INT | CTLFLAG_RW, | |
1939 | sc, 0, bge_sysctl_rx_max_coal_bds, "I", | |
1940 | "Receive max coalesced BD count."); | |
1941 | SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx, | |
1942 | SYSCTL_CHILDREN(sc->bge_sysctl_tree), | |
1943 | OID_AUTO, "tx_max_coal_bds", | |
1944 | CTLTYPE_INT | CTLFLAG_RW, | |
1945 | sc, 0, bge_sysctl_tx_max_coal_bds, "I", | |
1946 | "Transmit max coalesced BD count."); | |
1947 | ||
984263bc MD |
1948 | /* |
1949 | * Call MI attach routine. | |
1950 | */ | |
78195a76 | 1951 | ether_ifattach(ifp, ether_addr, NULL); |
984263bc | 1952 | |
78195a76 MD |
1953 | error = bus_setup_intr(dev, sc->bge_irq, INTR_NETSAFE, |
1954 | bge_intr, sc, &sc->bge_intrhand, | |
1955 | ifp->if_serializer); | |
9a717c15 JS |
1956 | if (error) { |
1957 | ether_ifdetach(ifp); | |
1958 | device_printf(dev, "couldn't set up irq\n"); | |
1959 | goto fail; | |
1960 | } | |
9db4b353 SZ |
1961 | |
1962 | ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->bge_irq)); | |
1963 | KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); | |
1964 | ||
9a717c15 | 1965 | return(0); |
984263bc | 1966 | fail: |
9a717c15 | 1967 | bge_detach(dev); |
984263bc MD |
1968 | return(error); |
1969 | } | |
1970 | ||
1971 | static int | |
33c39a69 | 1972 | bge_detach(device_t dev) |
984263bc | 1973 | { |
9a717c15 | 1974 | struct bge_softc *sc = device_get_softc(dev); |
984263bc | 1975 | |
9a717c15 | 1976 | if (device_is_attached(dev)) { |
baf731bb SZ |
1977 | struct ifnet *ifp = &sc->arpcom.ac_if; |
1978 | ||
cdf89432 | 1979 | lwkt_serialize_enter(ifp->if_serializer); |
9a717c15 JS |
1980 | bge_stop(sc); |
1981 | bge_reset(sc); | |
cdf89432 SZ |
1982 | bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); |
1983 | lwkt_serialize_exit(ifp->if_serializer); | |
984263bc | 1984 | |
cdf89432 SZ |
1985 | ether_ifdetach(ifp); |
1986 | } | |
baf731bb | 1987 | |
0ecb11d7 | 1988 | if (sc->bge_flags & BGE_FLAG_TBI) |
984263bc | 1989 | ifmedia_removeall(&sc->bge_ifmedia); |
cbf32d7e | 1990 | if (sc->bge_miibus) |
984263bc | 1991 | device_delete_child(dev, sc->bge_miibus); |
9a717c15 | 1992 | bus_generic_detach(dev); |
984263bc | 1993 | |
984263bc MD |
1994 | if (sc->bge_irq != NULL) |
1995 | bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq); | |
1996 | ||
1997 | if (sc->bge_res != NULL) | |
1998 | bus_release_resource(dev, SYS_RES_MEMORY, | |
1999 | BGE_PCI_BAR0, sc->bge_res); | |
baf731bb | 2000 | |
055d06f0 SZ |
2001 | if (sc->bge_sysctl_tree != NULL) |
2002 | sysctl_ctx_free(&sc->bge_sysctl_ctx); | |
2003 | ||
baf731bb SZ |
2004 | bge_dma_free(sc); |
2005 | ||
2006 | return 0; | |
984263bc MD |
2007 | } |
2008 | ||
2009 | static void | |
33c39a69 | 2010 | bge_reset(struct bge_softc *sc) |
984263bc MD |
2011 | { |
2012 | device_t dev; | |
9a6ee7e2 | 2013 | uint32_t cachesize, command, pcistate, reset; |
0ecb11d7 | 2014 | void (*write_op)(struct bge_softc *, uint32_t, uint32_t); |
984263bc MD |
2015 | int i, val = 0; |
2016 | ||
2017 | dev = sc->bge_dev; | |
2018 | ||
0ecb11d7 SZ |
2019 | if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc)) { |
2020 | if (sc->bge_flags & BGE_FLAG_PCIE) | |
2021 | write_op = bge_writemem_direct; | |
2022 | else | |
2023 | write_op = bge_writemem_ind; | |
2024 | } else { | |
2025 | write_op = bge_writereg_ind; | |
2026 | } | |
2027 | ||
984263bc MD |
2028 | /* Save some important PCI state. */ |
2029 | cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); | |
2030 | command = pci_read_config(dev, BGE_PCI_CMD, 4); | |
2031 | pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); | |
2032 | ||
2033 | pci_write_config(dev, BGE_PCI_MISC_CTL, | |
2034 | BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| | |
20c9a969 | 2035 | BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4); |
984263bc | 2036 | |
0ecb11d7 SZ |
2037 | /* Disable fastboot on controllers that support it. */ |
2038 | if (sc->bge_asicrev == BGE_ASICREV_BCM5752 || | |
2039 | sc->bge_asicrev == BGE_ASICREV_BCM5755 || | |
2040 | sc->bge_asicrev == BGE_ASICREV_BCM5787) { | |
2041 | if (bootverbose) | |
2042 | if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n"); | |
2043 | CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0); | |
2044 | } | |
2045 | ||
2046 | /* | |
2047 | * Write the magic number to SRAM at offset 0xB50. | |
2048 | * When firmware finishes its initialization it will | |
2049 | * write ~BGE_MAGIC_NUMBER to the same location. | |
2050 | */ | |
2051 | bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); | |
2052 | ||
9a6ee7e2 JS |
2053 | reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1); |
2054 | ||
2055 | /* XXX: Broadcom Linux driver. */ | |
0ecb11d7 | 2056 | if (sc->bge_flags & BGE_FLAG_PCIE) { |
9a6ee7e2 JS |
2057 | if (CSR_READ_4(sc, 0x7e2c) == 0x60) /* PCIE 1.0 */ |
2058 | CSR_WRITE_4(sc, 0x7e2c, 0x20); | |
2059 | if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { | |
2060 | /* Prevent PCIE link training during global reset */ | |
2061 | CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29)); | |
2062 | reset |= (1<<29); | |
2063 | } | |
2064 | } | |
2065 | ||
0ecb11d7 SZ |
2066 | /* |
2067 | * Set GPHY Power Down Override to leave GPHY | |
2068 | * powered up in D0 uninitialized. | |
2069 | */ | |
2070 | if (BGE_IS_5705_PLUS(sc)) | |
2071 | reset |= 0x04000000; | |
2072 | ||
984263bc | 2073 | /* Issue global reset */ |
0ecb11d7 | 2074 | write_op(sc, BGE_MISC_CFG, reset); |
984263bc MD |
2075 | |
2076 | DELAY(1000); | |
2077 | ||
9a6ee7e2 | 2078 | /* XXX: Broadcom Linux driver. */ |
0ecb11d7 | 2079 | if (sc->bge_flags & BGE_FLAG_PCIE) { |
9a6ee7e2 JS |
2080 | if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { |
2081 | uint32_t v; | |
2082 | ||
2083 | DELAY(500000); /* wait for link training to complete */ | |
2084 | v = pci_read_config(dev, 0xc4, 4); | |
2085 | pci_write_config(dev, 0xc4, v | (1<<15), 4); | |
2086 | } | |
0ecb11d7 SZ |
2087 | /* |
2088 | * Set PCIE max payload size to 128 bytes and | |
2089 | * clear error status. | |
2090 | */ | |
9a6ee7e2 JS |
2091 | pci_write_config(dev, 0xd8, 0xf5000, 4); |
2092 | } | |
2093 | ||
984263bc MD |
2094 | /* Reset some of the PCI state that got zapped by reset */ |
2095 | pci_write_config(dev, BGE_PCI_MISC_CTL, | |
2096 | BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| | |
20c9a969 | 2097 | BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4); |
984263bc MD |
2098 | pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); |
2099 | pci_write_config(dev, BGE_PCI_CMD, command, 4); | |
0ecb11d7 | 2100 | write_op(sc, BGE_MISC_CFG, (65 << 1)); |
984263bc | 2101 | |
a313b56f | 2102 | /* Enable memory arbiter. */ |
0ecb11d7 SZ |
2103 | if (BGE_IS_5714_FAMILY(sc)) { |
2104 | uint32_t val; | |
2105 | ||
2106 | val = CSR_READ_4(sc, BGE_MARB_MODE); | |
2107 | CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val); | |
2108 | } else { | |
a313b56f | 2109 | CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); |
0ecb11d7 | 2110 | } |
a313b56f | 2111 | |
984263bc | 2112 | /* |
0ecb11d7 | 2113 | * Poll until we see the 1's complement of the magic number. |
984263bc MD |
2114 | * This indicates that the firmware initialization |
2115 | * is complete. | |
2116 | */ | |
2117 | for (i = 0; i < BGE_TIMEOUT; i++) { | |
2118 | val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); | |
2119 | if (val == ~BGE_MAGIC_NUMBER) | |
2120 | break; | |
2121 | DELAY(10); | |
2122 | } | |
2123 | ||
2124 | if (i == BGE_TIMEOUT) { | |
0ecb11d7 SZ |
2125 | if_printf(&sc->arpcom.ac_if, "firmware handshake timed out," |
2126 | "found 0x%08x\n", val); | |
984263bc MD |
2127 | return; |
2128 | } | |
2129 | ||
2130 | /* | |
2131 | * XXX Wait for the value of the PCISTATE register to | |
2132 | * return to its original pre-reset state. This is a | |
2133 | * fairly good indicator of reset completion. If we don't | |
2134 | * wait for the reset to fully complete, trying to read | |
2135 | * from the device's non-PCI registers may yield garbage | |
2136 | * results. | |
2137 | */ | |
2138 | for (i = 0; i < BGE_TIMEOUT; i++) { | |
2139 | if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate) | |
2140 | break; | |
2141 | DELAY(10); | |
2142 | } | |
2143 | ||
0ecb11d7 SZ |
2144 | if (sc->bge_flags & BGE_FLAG_PCIE) { |
2145 | reset = bge_readmem_ind(sc, 0x7c00); | |
2146 | bge_writemem_ind(sc, 0x7c00, reset | (1 << 25)); | |
2147 | } | |
2148 | ||
984263bc | 2149 | /* Fix up byte swapping */ |
20c9a969 | 2150 | CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS | |
984263bc MD |
2151 | BGE_MODECTL_BYTESWAP_DATA); |
2152 | ||
2153 | CSR_WRITE_4(sc, BGE_MAC_MODE, 0); | |
2154 | ||
70059b3c JS |
2155 | /* |
2156 | * The 5704 in TBI mode apparently needs some special | |
2157 | * adjustment to insure the SERDES drive level is set | |
2158 | * to 1.2V. | |
2159 | */ | |
0ecb11d7 SZ |
2160 | if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && |
2161 | (sc->bge_flags & BGE_FLAG_TBI)) { | |
70059b3c JS |
2162 | uint32_t serdescfg; |
2163 | ||
2164 | serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); | |
2165 | serdescfg = (serdescfg & ~0xFFF) | 0x880; | |
2166 | CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); | |
2167 | } | |
2168 | ||
9a6ee7e2 | 2169 | /* XXX: Broadcom Linux driver. */ |
0ecb11d7 SZ |
2170 | if ((sc->bge_flags & BGE_FLAG_PCIE) && |
2171 | sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { | |
9a6ee7e2 | 2172 | uint32_t v; |
984263bc | 2173 | |
9a6ee7e2 JS |
2174 | v = CSR_READ_4(sc, 0x7c00); |
2175 | CSR_WRITE_4(sc, 0x7c00, v | (1<<25)); | |
2176 | } | |
2177 | ||
2178 | DELAY(10000); | |
984263bc MD |
2179 | } |
2180 | ||
2181 | /* | |
2182 | * Frame reception handling. This is called if there's a frame | |
2183 | * on the receive return list. | |
2184 | * | |
2185 | * Note: we have to be able to handle two possibilities here: | |
2186 | * 1) the frame is from the jumbo recieve ring | |
2187 | * 2) the frame is from the standard receive ring | |
2188 | */ | |
2189 | ||
2190 | static void | |
33c39a69 | 2191 | bge_rxeof(struct bge_softc *sc) |
984263bc MD |
2192 | { |
2193 | struct ifnet *ifp; | |
2194 | int stdcnt = 0, jumbocnt = 0; | |
a7db2caa SZ |
2195 | #ifdef ETHER_INPUT_CHAIN |
2196 | struct mbuf_chain chain[MAXCPU]; | |
2197 | #endif | |
984263bc | 2198 | |
449e06cc | 2199 | if (sc->bge_rx_saved_considx == |
20c9a969 | 2200 | sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) |
449e06cc SZ |
2201 | return; |
2202 | ||
a7db2caa SZ |
2203 | #ifdef ETHER_INPUT_CHAIN |
2204 | ether_input_chain_init(chain); | |
2205 | #endif | |
2206 | ||
984263bc MD |
2207 | ifp = &sc->arpcom.ac_if; |
2208 | ||
20c9a969 SZ |
2209 | bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, |
2210 | sc->bge_cdata.bge_rx_return_ring_map, | |
2211 | BUS_DMASYNC_POSTREAD); | |
2212 | bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, | |
2213 | sc->bge_cdata.bge_rx_std_ring_map, | |
2214 | BUS_DMASYNC_POSTREAD); | |
0ecb11d7 | 2215 | if (BGE_IS_JUMBO_CAPABLE(sc)) { |
20c9a969 SZ |
2216 | bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, |
2217 | sc->bge_cdata.bge_rx_jumbo_ring_map, | |
2218 | BUS_DMASYNC_POSTREAD); | |
2219 | } | |
2220 | ||
2221 | while (sc->bge_rx_saved_considx != | |
2222 | sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) { | |
984263bc | 2223 | struct bge_rx_bd *cur_rx; |
33c39a69 | 2224 | uint32_t rxidx; |
984263bc | 2225 | struct mbuf *m = NULL; |
33c39a69 | 2226 | uint16_t vlan_tag = 0; |
984263bc MD |
2227 | int have_tag = 0; |
2228 | ||
2229 | cur_rx = | |
20c9a969 | 2230 | &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx]; |
984263bc MD |
2231 | |
2232 | rxidx = cur_rx->bge_idx; | |
7e40b8c5 | 2233 | BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt); |
6b880771 | 2234 | logif(rx_pkt); |
984263bc MD |
2235 | |
2236 | if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { | |
2237 | have_tag = 1; | |
2238 | vlan_tag = cur_rx->bge_vlan_tag; | |
2239 | } | |
2240 | ||
2241 | if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { | |
2242 | BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); | |
2243 | m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; | |
2244 | sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; | |
2245 | jumbocnt++; | |
2246 | if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { | |
2247 | ifp->if_ierrors++; | |
2248 | bge_newbuf_jumbo(sc, sc->bge_jumbo, m); | |
2249 | continue; | |
2250 | } | |
2251 | if (bge_newbuf_jumbo(sc, | |
2252 | sc->bge_jumbo, NULL) == ENOBUFS) { | |
2253 | ifp->if_ierrors++; | |
2254 | bge_newbuf_jumbo(sc, sc->bge_jumbo, m); | |
2255 | continue; | |
2256 | } | |
2257 | } else { | |
2258 | BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); | |
20c9a969 SZ |
2259 | bus_dmamap_sync(sc->bge_cdata.bge_mtag, |
2260 | sc->bge_cdata.bge_rx_std_dmamap[rxidx], | |
2261 | BUS_DMASYNC_POSTREAD); | |
2262 | bus_dmamap_unload(sc->bge_cdata.bge_mtag, | |
2263 | sc->bge_cdata.bge_rx_std_dmamap[rxidx]); | |
984263bc MD |
2264 | m = sc->bge_cdata.bge_rx_std_chain[rxidx]; |
2265 | sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; | |
2266 | stdcnt++; | |
2267 | if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { | |
2268 | ifp->if_ierrors++; | |
2269 | bge_newbuf_std(sc, sc->bge_std, m); | |
2270 | continue; | |
2271 | } | |
2272 | if (bge_newbuf_std(sc, sc->bge_std, | |
2273 | NULL) == ENOBUFS) { | |
2274 | ifp->if_ierrors++; | |
2275 | bge_newbuf_std(sc, sc->bge_std, m); | |
2276 | continue; | |
2277 | } | |
2278 | } | |
2279 | ||
2280 | ifp->if_ipackets++; | |
2281 | #ifndef __i386__ | |
2282 | /* | |
2283 | * The i386 allows unaligned accesses, but for other | |
2284 | * platforms we must make sure the payload is aligned. | |
2285 | */ | |
0ecb11d7 | 2286 | if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) { |
984263bc MD |
2287 | bcopy(m->m_data, m->m_data + ETHER_ALIGN, |
2288 | cur_rx->bge_len); | |
2289 | m->m_data += ETHER_ALIGN; | |
2290 | } | |
2291 | #endif | |
160185fa | 2292 | m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; |
984263bc MD |
2293 | m->m_pkthdr.rcvif = ifp; |
2294 | ||
cb623c48 SZ |
2295 | if (ifp->if_capenable & IFCAP_RXCSUM) { |
2296 | if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { | |
2297 | m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; | |
2298 | if ((cur_rx->bge_ip_csum ^ 0xffff) == 0) | |
2299 | m->m_pkthdr.csum_flags |= CSUM_IP_VALID; | |
2300 | } | |
17240569 | 2301 | if ((cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) && |
cb623c48 | 2302 | m->m_pkthdr.len >= BGE_MIN_FRAME) { |
984263bc | 2303 | m->m_pkthdr.csum_data = |
17240569 | 2304 | cur_rx->bge_tcp_udp_csum; |
bf29e666 SZ |
2305 | m->m_pkthdr.csum_flags |= |
2306 | CSUM_DATA_VALID | CSUM_PSEUDO_HDR; | |
984263bc MD |
2307 | } |
2308 | } | |
984263bc MD |
2309 | |
2310 | /* | |
2311 | * If we received a packet with a vlan tag, pass it | |
2312 | * to vlan_input() instead of ether_input(). | |
2313 | */ | |
2314 | if (have_tag) { | |
e6b5847c SZ |
2315 | m->m_flags |= M_VLANTAG; |
2316 | m->m_pkthdr.ether_vlantag = vlan_tag; | |
984263bc | 2317 | have_tag = vlan_tag = 0; |
984263bc | 2318 | } |
a7db2caa SZ |
2319 | #ifdef ETHER_INPUT_CHAIN |
2320 | #ifdef ETHER_INPUT2 | |
2321 | ether_input_chain2(ifp, m, chain); | |
2322 | #else | |
2323 | ether_input_chain(ifp, m, chain); | |
2324 | #endif | |
2325 | #else | |
e6b5847c | 2326 | ifp->if_input(ifp, m); |
a7db2caa | 2327 | #endif |
984263bc MD |
2328 | } |
2329 | ||
a7db2caa SZ |
2330 | #ifdef ETHER_INPUT_CHAIN |
2331 | ether_input_dispatch(chain); | |
2332 | #endif | |
2333 | ||
20c9a969 SZ |
2334 | if (stdcnt > 0) { |
2335 | bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, | |
2336 | sc->bge_cdata.bge_rx_std_ring_map, | |
2337 | BUS_DMASYNC_PREWRITE); | |
2338 | } | |
2339 | ||
0ecb11d7 SZ |
2340 | if (BGE_IS_JUMBO_CAPABLE(sc) && jumbocnt > 0) { |
2341 | bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, | |
2342 | sc->bge_cdata.bge_rx_jumbo_ring_map, | |
2343 | BUS_DMASYNC_PREWRITE); | |
20c9a969 SZ |
2344 | } |
2345 | ||
984263bc MD |
2346 | CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); |
2347 | if (stdcnt) | |
2348 | CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); | |
2349 | if (jumbocnt) | |
2350 | CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); | |
984263bc MD |
2351 | } |
2352 | ||
2353 | static void | |
33c39a69 | 2354 | bge_txeof(struct bge_softc *sc) |
984263bc MD |
2355 | { |
2356 | struct bge_tx_bd *cur_tx = NULL; | |
2357 | struct ifnet *ifp; | |
2358 | ||
449e06cc | 2359 | if (sc->bge_tx_saved_considx == |
20c9a969 | 2360 | sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) |
449e06cc SZ |
2361 | return; |
2362 | ||
984263bc MD |
2363 | ifp = &sc->arpcom.ac_if; |
2364 | ||
20c9a969 SZ |
2365 | bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, |
2366 | sc->bge_cdata.bge_tx_ring_map, | |
2367 | BUS_DMASYNC_POSTREAD); | |
2368 | ||
984263bc MD |
2369 | /* |
2370 | * Go through our tx ring and free mbufs for those | |
2371 | * frames that have been sent. | |
2372 | */ | |
2373 | while (sc->bge_tx_saved_considx != | |
20c9a969 SZ |
2374 | sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) { |
2375 | uint32_t idx = 0; | |
984263bc MD |
2376 | |
2377 | idx = sc->bge_tx_saved_considx; | |
20c9a969 | 2378 | cur_tx = &sc->bge_ldata.bge_tx_ring[idx]; |
984263bc MD |
2379 | if (cur_tx->bge_flags & BGE_TXBDFLAG_END) |
2380 | ifp->if_opackets++; | |
2381 | if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { | |
20c9a969 SZ |
2382 | bus_dmamap_sync(sc->bge_cdata.bge_mtag, |
2383 | sc->bge_cdata.bge_tx_dmamap[idx], | |
2384 | BUS_DMASYNC_POSTWRITE); | |
2385 | bus_dmamap_unload(sc->bge_cdata.bge_mtag, | |
2386 | sc->bge_cdata.bge_tx_dmamap[idx]); | |
984263bc MD |
2387 | m_freem(sc->bge_cdata.bge_tx_chain[idx]); |
2388 | sc->bge_cdata.bge_tx_chain[idx] = NULL; | |
2389 | } | |
2390 | sc->bge_txcnt--; | |
2391 | BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); | |
6b880771 | 2392 | logif(tx_pkt); |
984263bc MD |
2393 | } |
2394 | ||
20c9a969 SZ |
2395 | if (cur_tx != NULL && |
2396 | (BGE_TX_RING_CNT - sc->bge_txcnt) >= | |
2397 | (BGE_NSEG_RSVD + BGE_NSEG_SPARE)) | |
984263bc | 2398 | ifp->if_flags &= ~IFF_OACTIVE; |
20c9a969 | 2399 | |
142ca760 SZ |
2400 | if (sc->bge_txcnt == 0) |
2401 | ifp->if_timer = 0; | |
2402 | ||
20c9a969 | 2403 | if (!ifq_is_empty(&ifp->if_snd)) |
9db4b353 | 2404 | if_devstart(ifp); |
984263bc MD |
2405 | } |
2406 | ||
315fe0ee MD |
2407 | #ifdef DEVICE_POLLING |
2408 | ||
2409 | static void | |
2410 | bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) | |
2411 | { | |
2412 | struct bge_softc *sc = ifp->if_softc; | |
2413 | uint32_t status; | |
2414 | ||
2415 | switch(cmd) { | |
2416 | case POLL_REGISTER: | |
ba39cc82 | 2417 | bge_disable_intr(sc); |
315fe0ee MD |
2418 | break; |
2419 | case POLL_DEREGISTER: | |
ba39cc82 | 2420 | bge_enable_intr(sc); |
315fe0ee MD |
2421 | break; |
2422 | case POLL_AND_CHECK_STATUS: | |
2423 | bus_dmamap_sync(sc->bge_cdata.bge_status_tag, | |
2424 | sc->bge_cdata.bge_status_map, | |
2425 | BUS_DMASYNC_POSTREAD); | |
2426 | ||
2427 | /* | |
2428 | * Process link state changes. | |
2429 | */ | |
2430 | status = CSR_READ_4(sc, BGE_MAC_STS); | |
2431 | if ((status & sc->bge_link_chg) || sc->bge_link_evt) { | |
2432 | sc->bge_link_evt = 0; | |
2433 | sc->bge_link_upd(sc, status); | |
2434 | } | |
2435 | /* fall through */ | |
2436 | case POLL_ONLY: | |
2437 | if (ifp->if_flags & IFF_RUNNING) { | |
2438 | bge_rxeof(sc); | |
2439 | bge_txeof(sc); | |
2440 | } | |
2441 | break; | |
2442 | } | |
2443 | } | |
2444 | ||
2445 | #endif | |
2446 | ||
984263bc | 2447 | static void |
33c39a69 | 2448 | bge_intr(void *xsc) |
984263bc | 2449 | { |
bf522c7f | 2450 | struct bge_softc *sc = xsc; |
33c39a69 | 2451 | struct ifnet *ifp = &sc->arpcom.ac_if; |
6b880771 SZ |
2452 | uint32_t status; |
2453 | ||
2454 | logif(intr); | |
0029ccf6 | 2455 | |
142ca760 SZ |
2456 | /* |
2457 | * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't | |
2458 | * disable interrupts by writing nonzero like we used to, since with | |
2459 | * our current organization this just gives complications and | |
2460 | * pessimizations for re-enabling interrupts. We used to have races | |
2461 | * instead of the necessary complications. Disabling interrupts | |
2462 | * would just reduce the chance of a status update while we are | |
2463 | * running (by switching to the interrupt-mode coalescence | |
2464 | * parameters), but this chance is already very low so it is more | |
2465 | * efficient to get another interrupt than prevent it. | |
2466 | * | |
2467 | * We do the ack first to ensure another interrupt if there is a | |
2468 | * status update after the ack. We don't check for the status | |
2469 | * changing later because it is more efficient to get another | |
2470 | * interrupt than prevent it, not quite as above (not checking is | |
2471 | * a smaller optimization than not toggling the interrupt enable, | |
2472 | * since checking doesn't involve PCI accesses and toggling require | |
2473 | * the status check). So toggling would probably be a pessimization | |
2474 | * even with MSI. It would only be needed for using a task queue. | |
2475 | */ | |
2476 | CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); | |
2477 | ||
20c9a969 SZ |
2478 | bus_dmamap_sync(sc->bge_cdata.bge_status_tag, |
2479 | sc->bge_cdata.bge_status_map, | |
2480 | BUS_DMASYNC_POSTREAD); | |
2481 | ||
984263bc MD |
2482 | /* |
2483 | * Process link state changes. | |
984263bc | 2484 | */ |
db861466 SZ |
2485 | status = CSR_READ_4(sc, BGE_MAC_STS); |
2486 | if ((status & sc->bge_link_chg) || sc->bge_link_evt) { | |
2487 | sc->bge_link_evt = 0; | |
2488 | sc->bge_link_upd(sc, status); | |
984263bc MD |
2489 | } |
2490 | ||
2491 | if (ifp->if_flags & IFF_RUNNING) { | |
2492 | /* Check RX return ring producer/consumer */ | |
2493 | bge_rxeof(sc); | |
2494 | ||
2495 | /* Check TX ring producer/consumer */ | |
2496 | bge_txeof(sc); | |
2497 | } | |
055d06f0 SZ |
2498 | |
2499 | if (sc->bge_coal_chg) | |
2500 | bge_coal_change(sc); | |
984263bc MD |
2501 | } |
2502 | ||
2503 | static void | |
33c39a69 | 2504 | bge_tick(void *xsc) |
78195a76 MD |
2505 | { |
2506 | struct bge_softc *sc = xsc; | |
2507 | struct ifnet *ifp = &sc->arpcom.ac_if; | |
2508 | ||
2509 | lwkt_serialize_enter(ifp->if_serializer); | |
984263bc | 2510 | |
0ecb11d7 | 2511 | if (BGE_IS_5705_PLUS(sc)) |
7e40b8c5 HP |
2512 | bge_stats_update_regs(sc); |
2513 | else | |
2514 | bge_stats_update(sc); | |
9a717c15 | 2515 | |
0ecb11d7 | 2516 | if (sc->bge_flags & BGE_FLAG_TBI) { |
db861466 SZ |
2517 | /* |
2518 | * Since in TBI mode auto-polling can't be used we should poll | |
2519 | * link status manually. Here we register pending link event | |
2520 | * and trigger interrupt. | |
2521 | */ | |
2522 | sc->bge_link_evt++; | |
2523 | BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); | |
3f82ed83 | 2524 | } else if (!sc->bge_link) { |
db861466 | 2525 | mii_tick(device_get_softc(sc->bge_miibus)); |
984263bc MD |
2526 | } |
2527 | ||
db861466 SZ |
2528 | callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc); |
2529 | ||
2530 | lwkt_serialize_exit(ifp->if_serializer); | |
984263bc MD |
2531 | } |
2532 | ||
7e40b8c5 | 2533 | static void |
33c39a69 | 2534 | bge_stats_update_regs(struct bge_softc *sc) |
7e40b8c5 | 2535 | { |
33c39a69 | 2536 | struct ifnet *ifp = &sc->arpcom.ac_if; |
7e40b8c5 | 2537 | struct bge_mac_stats_regs stats; |
33c39a69 | 2538 | uint32_t *s; |
7e40b8c5 HP |
2539 | int i; |
2540 | ||
33c39a69 | 2541 | s = (uint32_t *)&stats; |
7e40b8c5 HP |
2542 | for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) { |
2543 | *s = CSR_READ_4(sc, BGE_RX_STATS + i); | |
2544 | s++; | |
2545 | } | |
2546 | ||
2547 | ifp->if_collisions += | |
2548 | (stats.dot3StatsSingleCollisionFrames + | |
2549 | stats.dot3StatsMultipleCollisionFrames + | |
2550 | stats.dot3StatsExcessiveCollisions + | |
2551 | stats.dot3StatsLateCollisions) - | |
2552 | ifp->if_collisions; | |
7e40b8c5 HP |
2553 | } |
2554 | ||
984263bc | 2555 | static void |
33c39a69 | 2556 | bge_stats_update(struct bge_softc *sc) |
984263bc | 2557 | { |
33c39a69 | 2558 | struct ifnet *ifp = &sc->arpcom.ac_if; |
20c9a969 SZ |
2559 | bus_size_t stats; |
2560 | ||
2561 | stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; | |
984263bc | 2562 | |
20c9a969 SZ |
2563 | #define READ_STAT(sc, stats, stat) \ |
2564 | CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) | |
984263bc MD |
2565 | |
2566 | ifp->if_collisions += | |
20c9a969 SZ |
2567 | (READ_STAT(sc, stats, |
2568 | txstats.dot3StatsSingleCollisionFrames.bge_addr_lo) + | |
2569 | READ_STAT(sc, stats, | |
2570 | txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo) + | |
2571 | READ_STAT(sc, stats, | |
2572 | txstats.dot3StatsExcessiveCollisions.bge_addr_lo) + | |
2573 | READ_STAT(sc, stats, | |
2574 | txstats.dot3StatsLateCollisions.bge_addr_lo)) - | |
984263bc MD |
2575 | ifp->if_collisions; |
2576 | ||
20c9a969 SZ |
2577 | #undef READ_STAT |
2578 | ||
984263bc MD |
2579 | #ifdef notdef |
2580 | ifp->if_collisions += | |
2581 | (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + | |
2582 | sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + | |
2583 | sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + | |
2584 | sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - | |
2585 | ifp->if_collisions; | |
2586 | #endif | |
984263bc MD |
2587 | } |
2588 | ||
2589 | /* | |
2590 | * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data | |
2591 | * pointers to descriptors. | |
2592 | */ | |
2593 | static int | |
4a607ed6 | 2594 | bge_encap(struct bge_softc *sc, struct mbuf **m_head0, uint32_t *txidx) |
984263bc | 2595 | { |
20c9a969 | 2596 | struct bge_tx_bd *d = NULL; |
33c39a69 | 2597 | uint16_t csum_flags = 0; |
20c9a969 SZ |
2598 | struct bge_dmamap_arg ctx; |
2599 | bus_dma_segment_t segs[BGE_NSEG_NEW]; | |
2600 | bus_dmamap_t map; | |
2601 | int error, maxsegs, idx, i; | |
4a607ed6 | 2602 | struct mbuf *m_head = *m_head0; |
984263bc | 2603 | |
984263bc MD |
2604 | if (m_head->m_pkthdr.csum_flags) { |
2605 | if (m_head->m_pkthdr.csum_flags & CSUM_IP) | |
2606 | csum_flags |= BGE_TXBDFLAG_IP_CSUM; | |
2607 | if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) | |
2608 | csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; | |
2609 | if (m_head->m_flags & M_LASTFRAG) | |
2610 | csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; | |
2611 | else if (m_head->m_flags & M_FRAG) | |
2612 | csum_flags |= BGE_TXBDFLAG_IP_FRAG; | |
2613 | } | |
20c9a969 SZ |
2614 | |
2615 | idx = *txidx; | |
2616 | map = sc->bge_cdata.bge_tx_dmamap[idx]; | |
2617 | ||
2618 | maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - BGE_NSEG_RSVD; | |
2619 | KASSERT(maxsegs >= BGE_NSEG_SPARE, | |
2620 | ("not enough segments %d\n", maxsegs)); | |
2621 | ||
2622 | if (maxsegs > BGE_NSEG_NEW) | |
2623 | maxsegs = BGE_NSEG_NEW; | |
2624 | ||
cb623c48 SZ |
2625 | /* |
2626 | * Pad outbound frame to BGE_MIN_FRAME for an unusual reason. | |
2627 | * The bge hardware will pad out Tx runts to BGE_MIN_FRAME, | |
2628 | * but when such padded frames employ the bge IP/TCP checksum | |
2629 | * offload, the hardware checksum assist gives incorrect results | |
2630 | * (possibly from incorporating its own padding into the UDP/TCP | |
2631 | * checksum; who knows). If we pad such runts with zeros, the | |
2632 | * onboard checksum comes out correct. We do this by pretending | |
2633 | * the mbuf chain has too many fragments so the coalescing code | |
2634 | * below can assemble the packet into a single buffer that's | |
2635 | * padded out to the mininum frame size. | |
2636 | */ | |
2637 | if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) && | |
2638 | m_head->m_pkthdr.len < BGE_MIN_FRAME) { | |
bbb08e56 | 2639 | error = EFBIG; |
cb623c48 SZ |
2640 | } else { |
2641 | ctx.bge_segs = segs; | |
2642 | ctx.bge_maxsegs = maxsegs; | |
2643 | error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map, | |
2644 | m_head, bge_dma_map_mbuf, &ctx, | |
2645 | BUS_DMA_NOWAIT); | |
2646 | } | |
bbb08e56 | 2647 | if (error == EFBIG || ctx.bge_maxsegs == 0) { |
20c9a969 SZ |
2648 | struct mbuf *m_new; |
2649 | ||
2650 | m_new = m_defrag(m_head, MB_DONTWAIT); | |
2651 | if (m_new == NULL) { | |
2652 | if_printf(&sc->arpcom.ac_if, | |
2653 | "could not defrag TX mbuf\n"); | |
2654 | error = ENOBUFS; | |
2655 | goto back; | |
2656 | } else { | |
2657 | m_head = m_new; | |
4a607ed6 | 2658 | *m_head0 = m_head; |
20c9a969 SZ |
2659 | } |
2660 | ||
cb623c48 SZ |
2661 | /* |
2662 | * Manually pad short frames, and zero the pad space | |
2663 | * to avoid leaking data. | |
2664 | */ | |
2665 | if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) && | |
2666 | m_head->m_pkthdr.len < BGE_MIN_FRAME) { | |
2667 | int pad_len = BGE_MIN_FRAME - m_head->m_pkthdr.len; | |
2668 | ||
2669 | bzero(mtod(m_head, char *) + m_head->m_pkthdr.len, | |
2670 | pad_len); | |
2671 | m_head->m_pkthdr.len += pad_len; | |
2672 | m_head->m_len = m_head->m_pkthdr.len; | |
2673 | } | |
2674 | ||
20c9a969 SZ |
2675 | ctx.bge_segs = segs; |
2676 | ctx.bge_maxsegs = maxsegs; | |
2677 | error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map, | |
2678 | m_head, bge_dma_map_mbuf, &ctx, | |
2679 | BUS_DMA_NOWAIT); | |
2680 | if (error || ctx.bge_maxsegs == 0) { | |
2681 | if_printf(&sc->arpcom.ac_if, | |
2682 | "could not defrag TX mbuf\n"); | |
2683 | if (error == 0) | |
bbb08e56 | 2684 | error = EFBIG; |
20c9a969 | 2685 | goto back; |
984263bc | 2686 | } |
20c9a969 SZ |
2687 | } else if (error) { |
2688 | if_printf(&sc->arpcom.ac_if, "could not map TX mbuf\n"); | |
2689 | goto back; | |
984263bc MD |
2690 | } |
2691 | ||
20c9a969 | 2692 | bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE); |
984263bc | 2693 | |
20c9a969 SZ |
2694 | for (i = 0; ; i++) { |
2695 | d = &sc->bge_ldata.bge_tx_ring[idx]; | |
984263bc | 2696 | |
20c9a969 SZ |
2697 | d->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_segs[i].ds_addr); |
2698 | d->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_segs[i].ds_addr); | |
2699 | d->bge_len = segs[i].ds_len; | |
2700 | d->bge_flags = csum_flags; | |
984263bc | 2701 | |
20c9a969 SZ |
2702 | if (i == ctx.bge_maxsegs - 1) |
2703 | break; | |
2704 | BGE_INC(idx, BGE_TX_RING_CNT); | |
2705 | } | |
2706 | /* Mark the last segment as end of packet... */ | |
2707 | d->bge_flags |= BGE_TXBDFLAG_END; | |
984263bc | 2708 | |
20c9a969 SZ |
2709 | /* Set vlan tag to the first segment of the packet. */ |
2710 | d = &sc->bge_ldata.bge_tx_ring[*txidx]; | |
83790f85 | 2711 | if (m_head->m_flags & M_VLANTAG) { |
20c9a969 | 2712 | d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; |
83790f85 | 2713 | d->bge_vlan_tag = m_head->m_pkthdr.ether_vlantag; |
20c9a969 SZ |
2714 | } else { |
2715 | d->bge_vlan_tag = 0; | |
2716 | } | |
2717 | ||
2718 | /* | |
2719 | * Insure that the map for this transmission is placed at | |
2720 | * the array index of the last descriptor in this chain. | |
2721 | */ | |
2722 | sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx]; | |
2723 | sc->bge_cdata.bge_tx_dmamap[idx] = map; | |
2724 | sc->bge_cdata.bge_tx_chain[idx] = m_head; | |
2725 | sc->bge_txcnt += ctx.bge_maxsegs; | |
2726 | ||
2727 | BGE_INC(idx, BGE_TX_RING_CNT); | |
2728 | *txidx = idx; | |
2729 | back: | |
4a607ed6 | 2730 | if (error) { |
20c9a969 | 2731 | m_freem(m_head); |
4a607ed6 SZ |
2732 | *m_head0 = NULL; |
2733 | } | |
20c9a969 | 2734 | return error; |
984263bc MD |
2735 | } |
2736 | ||
2737 | /* | |
2738 | * Main transmit routine. To avoid having to do mbuf copies, we put pointers | |
2739 | * to the mbuf data regions directly in the transmit descriptors. | |
2740 | */ | |
2741 | static void | |
33c39a69 | 2742 | bge_start(struct ifnet *ifp) |
984263bc | 2743 | { |
20c9a969 | 2744 | struct bge_softc *sc = ifp->if_softc; |
984263bc | 2745 | struct mbuf *m_head = NULL; |
20c9a969 | 2746 | uint32_t prodidx; |
2f54d1d2 | 2747 | int need_trans; |
984263bc | 2748 | |
d47d96f2 | 2749 | if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) |
984263bc MD |
2750 | return; |
2751 | ||
94db8384 | 2752 | prodidx = sc->bge_tx_prodidx; |
984263bc | 2753 | |
2f54d1d2 | 2754 | need_trans = 0; |
75544bcd | 2755 | while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { |
9db4b353 | 2756 | m_head = ifq_dequeue(&ifp->if_snd, NULL); |
984263bc MD |
2757 | if (m_head == NULL) |
2758 | break; | |
2759 | ||
2760 | /* | |
cb623c48 SZ |
2761 | * XXX |
2762 | * The code inside the if() block is never reached since we | |
2763 | * must mark CSUM_IP_FRAGS in our if_hwassist to start getting | |
2764 | * requests to checksum TCP/UDP in a fragmented packet. | |
2765 | * | |
984263bc MD |
2766 | * XXX |
2767 | * safety overkill. If this is a fragmented packet chain | |
2768 | * with delayed TCP/UDP checksums, then only encapsulate | |
2769 | * it if we have enough descriptors to handle the entire | |
2770 | * chain at once. | |
2771 | * (paranoia -- may not actually be needed) | |
2772 | */ | |
9db4b353 SZ |
2773 | if ((m_head->m_flags & M_FIRSTFRAG) && |
2774 | (m_head->m_pkthdr.csum_flags & CSUM_DELAY_DATA)) { | |
984263bc | 2775 | if ((BGE_TX_RING_CNT - sc->bge_txcnt) < |
9db4b353 | 2776 | m_head->m_pkthdr.csum_data + BGE_NSEG_RSVD) { |
984263bc | 2777 | ifp->if_flags |= IFF_OACTIVE; |
9db4b353 | 2778 | ifq_prepend(&ifp->if_snd, m_head); |
984263bc MD |
2779 | break; |
2780 | } | |
2781 | } | |
2782 | ||
20c9a969 SZ |
2783 | /* |
2784 | * Sanity check: avoid coming within BGE_NSEG_RSVD | |
2785 | * descriptors of the end of the ring. Also make | |
2786 | * sure there are BGE_NSEG_SPARE descriptors for | |
2787 | * jumbo buffers' defragmentation. | |
2788 | */ | |
2789 | if ((BGE_TX_RING_CNT - sc->bge_txcnt) < | |
2790 | (BGE_NSEG_RSVD + BGE_NSEG_SPARE)) { | |
2791 | ifp->if_flags |= IFF_OACTIVE; | |
9db4b353 | 2792 | ifq_prepend(&ifp->if_snd, m_head); |
20c9a969 SZ |
2793 | break; |
2794 | } | |
2795 | ||
984263bc MD |
2796 | /* |
2797 | * Pack the data into the transmit ring. If we | |
2798 | * don't have room, set the OACTIVE flag and wait | |
2799 | * for the NIC to drain the ring. | |
2800 | */ | |
4a607ed6 | 2801 | if (bge_encap(sc, &m_head, &prodidx)) { |
984263bc MD |
2802 | ifp->if_flags |= IFF_OACTIVE; |
2803 | break; | |
2804 | } | |
2f54d1d2 | 2805 | need_trans = 1; |
984263bc | 2806 | |
b637f170 | 2807 | ETHER_BPF_MTAP(ifp, m_head); |
984263bc MD |
2808 | } |
2809 | ||
2f54d1d2 SZ |
2810 | if (!need_trans) |
2811 | return; | |
2812 | ||
984263bc MD |
2813 | /* Transmit */ |
2814 | CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); | |
2815 | /* 5700 b2 errata */ | |
2816 | if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) | |
2817 | CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); | |
2818 | ||
94db8384 SZ |
2819 | sc->bge_tx_prodidx = prodidx; |
2820 | ||
984263bc MD |
2821 | /* |
2822 | * Set a timeout in case the chip goes out to lunch. | |
2823 | */ | |
2824 | ifp->if_timer = 5; | |
984263bc MD |
2825 | } |
2826 | ||
2827 | static void | |
33c39a69 | 2828 | bge_init(void *xsc) |
984263bc MD |
2829 | { |
2830 | struct bge_softc *sc = xsc; | |
33c39a69 JS |
2831 | struct ifnet *ifp = &sc->arpcom.ac_if; |
2832 | uint16_t *m; | |
984263bc | 2833 | |
aa65409c SZ |
2834 | ASSERT_SERIALIZED(ifp->if_serializer); |
2835 | ||
2836 | if (ifp->if_flags & IFF_RUNNING) | |
984263bc | 2837 | return; |
984263bc MD |
2838 | |
2839 | /* Cancel pending I/O and flush buffers. */ | |
2840 | bge_stop(sc); | |
2841 | bge_reset(sc); | |
2842 | bge_chipinit(sc); | |
2843 | ||
2844 | /* | |
2845 | * Init the various state machines, ring | |
2846 | * control blocks and firmware. | |
2847 | */ | |
2848 | if (bge_blockinit(sc)) { | |
c6fd6f3b | 2849 | if_printf(ifp, "initialization failure\n"); |
984263bc MD |
2850 | return; |
2851 | } | |
2852 | ||
984263bc MD |
2853 | /* Specify MTU. */ |
2854 | CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + | |
011c0f93 | 2855 | ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN); |
984263bc MD |
2856 | |
2857 | /* Load our MAC address. */ | |
33c39a69 | 2858 | m = (uint16_t *)&sc->arpcom.ac_enaddr[0]; |
984263bc MD |
2859 | CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); |
2860 | CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); | |
2861 | ||
2862 | /* Enable or disable promiscuous mode as needed. */ | |
6439b28a | 2863 | bge_setpromisc(sc); |
984263bc MD |
2864 | |
2865 | /* Program multicast filter. */ | |
2866 | bge_setmulti(sc); | |
2867 | ||
2868 | /* Init RX ring. */ | |
2869 | bge_init_rx_ring_std(sc); | |
2870 | ||
7e40b8c5 HP |
2871 | /* |
2872 | * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's | |
2873 | * memory to insure that the chip has in fact read the first | |
2874 | * entry of the ring. | |
2875 | */ | |
2876 | if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { | |
33c39a69 | 2877 | uint32_t v, i; |
7e40b8c5 HP |
2878 | for (i = 0; i < 10; i++) { |
2879 | DELAY(20); | |
2880 | v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); | |
2881 | if (v == (MCLBYTES - ETHER_ALIGN)) | |
2882 | break; | |
2883 | } | |
2884 | if (i == 10) | |
c6fd6f3b | 2885 | if_printf(ifp, "5705 A0 chip failed to load RX ring\n"); |
7e40b8c5 HP |
2886 | } |
2887 | ||
984263bc MD |
2888 | /* Init jumbo RX ring. */ |
2889 | if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) | |
2890 | bge_init_rx_ring_jumbo(sc); | |
2891 | ||
2892 | /* Init our RX return ring index */ | |
2893 | sc->bge_rx_saved_considx = 0; | |
2894 | ||
2895 | /* Init TX ring. */ | |
2896 | bge_init_tx_ring(sc); | |
2897 | ||
2898 | /* Turn on transmitter */ | |
2899 | BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); | |
2900 | ||
2901 | /* Turn on receiver */ | |
2902 | BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); | |
2903 | ||
2904 | /* Tell firmware we're alive. */ | |
2905 | BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); | |
2906 | ||
ba39cc82 | 2907 | /* Enable host interrupts if polling(4) is not enabled. */ |
984263bc | 2908 | BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); |
315fe0ee | 2909 | #ifdef DEVICE_POLLING |
ba39cc82 SZ |
2910 | if (ifp->if_flags & IFF_POLLING) |
2911 | bge_disable_intr(sc); | |
2912 | else | |
315fe0ee | 2913 | #endif |
ba39cc82 | 2914 | bge_enable_intr(sc); |
984263bc MD |
2915 | |
2916 | bge_ifmedia_upd(ifp); | |
2917 | ||
2918 | ifp->if_flags |= IFF_RUNNING; | |
2919 | ifp->if_flags &= ~IFF_OACTIVE; | |
2920 | ||
263489fb | 2921 | callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc); |
984263bc MD |
2922 | } |
2923 | ||
2924 | /* | |
2925 | * Set media options. | |
2926 | */ | |
2927 | static int | |
33c39a69 | 2928 | bge_ifmedia_upd(struct ifnet *ifp) |
984263bc | 2929 | { |
33c39a69 | 2930 | struct bge_softc *sc = ifp->if_softc; |
984263bc MD |
2931 | |
2932 | /* If this is a 1000baseX NIC, enable the TBI port. */ | |
0ecb11d7 | 2933 | if (sc->bge_flags & BGE_FLAG_TBI) { |
db861466 SZ |
2934 | struct ifmedia *ifm = &sc->bge_ifmedia; |
2935 | ||
984263bc MD |
2936 | if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) |
2937 | return(EINVAL); | |
db861466 | 2938 | |
984263bc MD |
2939 | switch(IFM_SUBTYPE(ifm->ifm_media)) { |
2940 | case IFM_AUTO: | |
70059b3c JS |
2941 | /* |
2942 | * The BCM5704 ASIC appears to have a special | |
2943 | * mechanism for programming the autoneg | |
2944 | * advertisement registers in TBI mode. | |
2945 | */ | |
5c56d5d8 SZ |
2946 | if (!bge_fake_autoneg && |
2947 | sc->bge_asicrev == BGE_ASICREV_BCM5704) { | |
70059b3c JS |
2948 | uint32_t sgdig; |
2949 | ||
2950 | CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); | |
2951 | sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); | |
2952 | sgdig |= BGE_SGDIGCFG_AUTO | | |
2953 | BGE_SGDIGCFG_PAUSE_CAP | | |
2954 | BGE_SGDIGCFG_ASYM_PAUSE; | |
2955 | CSR_WRITE_4(sc, BGE_SGDIG_CFG, | |
2956 | sgdig | BGE_SGDIGCFG_SEND); | |
2957 | DELAY(5); | |
2958 | CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig); | |
2959 | } | |
984263bc MD |
2960 | break; |
2961 | case IFM_1000_SX: | |
2962 | if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { | |
2963 | BGE_CLRBIT(sc, BGE_MAC_MODE, | |
2964 | BGE_MACMODE_HALF_DUPLEX); | |
2965 | } else { | |
2966 | BGE_SETBIT(sc, BGE_MAC_MODE, | |
2967 | BGE_MACMODE_HALF_DUPLEX); | |
2968 | } | |
2969 | break; | |
2970 | default: | |
2971 | return(EINVAL); | |
2972 | } | |
db861466 SZ |
2973 | } else { |
2974 | struct mii_data *mii = device_get_softc(sc->bge_miibus); | |
984263bc | 2975 | |
db861466 | 2976 | sc->bge_link_evt++; |
3f82ed83 | 2977 | sc->bge_link = 0; |
db861466 SZ |
2978 | if (mii->mii_instance) { |
2979 | struct mii_softc *miisc; | |
984263bc | 2980 | |
db861466 SZ |
2981 | LIST_FOREACH(miisc, &mii->mii_phys, mii_list) |
2982 | mii_phy_reset(miisc); | |
2983 | } | |
2984 | mii_mediachg(mii); | |
2985 | } | |
984263bc MD |
2986 | return(0); |
2987 | } | |
2988 | ||
2989 | /* | |
2990 | * Report current media status. | |
2991 | */ | |
2992 | static void | |
33c39a69 | 2993 | bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) |
984263bc | 2994 | { |
33c39a69 | 2995 | struct bge_softc *sc = ifp->if_softc; |
984263bc | 2996 | |
0ecb11d7 | 2997 | if (sc->bge_flags & BGE_FLAG_TBI) { |
984263bc MD |
2998 | ifmr->ifm_status = IFM_AVALID; |
2999 | ifmr->ifm_active = IFM_ETHER; | |
3000 | if (CSR_READ_4(sc, BGE_MAC_STS) & | |
db861466 | 3001 | BGE_MACSTAT_TBI_PCS_SYNCHED) { |
984263bc | 3002 | ifmr->ifm_status |= IFM_ACTIVE; |
db861466 SZ |
3003 | } else { |
3004 | ifmr->ifm_active |= IFM_NONE; | |
3005 | return; | |
3006 | } | |
3007 | ||
984263bc MD |
3008 | ifmr->ifm_active |= IFM_1000_SX; |
3009 | if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) | |
3010 | ifmr->ifm_active |= IFM_HDX; | |
3011 | else | |
3012 | ifmr->ifm_active |= IFM_FDX; | |
db861466 SZ |
3013 | } else { |
3014 | struct mii_data *mii = device_get_softc(sc->bge_miibus); | |
984263bc | 3015 | |
db861466 SZ |
3016 | mii_pollstat(mii); |
3017 | ifmr->ifm_active = mii->mii_media_active; | |
3018 | ifmr->ifm_status = mii->mii_media_status; | |
3019 | } | |
984263bc MD |
3020 | } |
3021 | ||
3022 | static int | |
33c39a69 | 3023 | bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) |
984263bc MD |
3024 | { |
3025 | struct bge_softc *sc = ifp->if_softc; | |
98dabdac | 3026 | struct ifreq *ifr = (struct ifreq *)data; |
9a717c15 | 3027 | int mask, error = 0; |
984263bc | 3028 | |
aa65409c SZ |
3029 | ASSERT_SERIALIZED(ifp->if_serializer); |
3030 | ||
98dabdac | 3031 | switch (command) { |
984263bc | 3032 | case SIOCSIFMTU: |
0ecb11d7 SZ |
3033 | if ((!BGE_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) || |
3034 | (BGE_IS_JUMBO_CAPABLE(sc) && | |
3035 | ifr->ifr_mtu > BGE_JUMBO_MTU)) { | |
984263bc | 3036 | error = EINVAL; |
0ecb11d7 | 3037 | } else if (ifp->if_mtu != ifr->ifr_mtu) { |
984263bc MD |
3038 | ifp->if_mtu = ifr->ifr_mtu; |
3039 | ifp->if_flags &= ~IFF_RUNNING; | |
3040 | bge_init(sc); | |
3041 | } | |
3042 | break; | |
3043 | case SIOCSIFFLAGS: | |
3044 | if (ifp->if_flags & IFF_UP) { | |
6439b28a | 3045 | if (ifp->if_flags & IFF_RUNNING) { |
98dabdac | 3046 | mask = ifp->if_flags ^ sc->bge_if_flags; |
6439b28a SZ |
3047 | |
3048 | /* | |
3049 | * If only the state of the PROMISC flag | |
3050 | * changed, then just use the 'set promisc | |
3051 | * mode' command instead of reinitializing | |
3052 | * the entire NIC. Doing a full re-init | |
3053 | * means reloading the firmware and waiting | |
3054 | * for it to start up, which may take a | |
3055 | * second or two. Similarly for ALLMULTI. | |
3056 | */ | |
98dabdac | 3057 | if (mask & IFF_PROMISC) |
6439b28a | 3058 | bge_setpromisc(sc); |
98dabdac | 3059 | if (mask & IFF_ALLMULTI) |
6439b28a SZ |
3060 | bge_setmulti(sc); |
3061 | } else { | |
984263bc | 3062 | bge_init(sc); |
6439b28a | 3063 | } |
984263bc | 3064 | } else { |
aa65409c | 3065 | if (ifp->if_flags & IFF_RUNNING) |
984263bc | 3066 | bge_stop(sc); |
984263bc MD |
3067 | } |
3068 | sc->bge_if_flags = ifp->if_flags; | |
984263bc MD |
3069 | break; |
3070 | case SIOCADDMULTI: | |
3071 | case SIOCDELMULTI: | |
98dabdac | 3072 | if (ifp->if_flags & IFF_RUNNING) |
984263bc | 3073 | bge_setmulti(sc); |
984263bc MD |
3074 | break; |
3075 | case SIOCSIFMEDIA: | |
3076 | case SIOCGIFMEDIA: | |
0ecb11d7 | 3077 | if (sc->bge_flags & BGE_FLAG_TBI) { |
984263bc MD |
3078 | error = ifmedia_ioctl(ifp, ifr, |
3079 | &sc->bge_ifmedia, command); | |
3080 | } else { | |
98dabdac SZ |
3081 | struct mii_data *mii; |
3082 | ||
984263bc MD |
3083 | mii = device_get_softc(sc->bge_miibus); |
3084 | error = ifmedia_ioctl(ifp, ifr, | |
98dabdac | 3085 | &mii->mii_media, command); |
984263bc MD |
3086 | } |
3087 | break; | |
3088 | case SIOCSIFCAP: | |
3089 | mask = ifr->ifr_reqcap ^ ifp->if_capenable; | |
3090 | if (mask & IFCAP_HWCSUM) { | |
cb623c48 | 3091 | ifp->if_capenable ^= IFCAP_HWCSUM; |
984263bc | 3092 | if (IFCAP_HWCSUM & ifp->if_capenable) |
cb623c48 | 3093 | ifp->if_hwassist = BGE_CSUM_FEATURES; |
984263bc | 3094 | else |
cb623c48 | 3095 | ifp->if_hwassist = 0; |
984263bc | 3096 | } |
984263bc MD |
3097 | break; |
3098 | default: | |
4cde4dd5 | 3099 | error = ether_ioctl(ifp, command, data); |
984263bc MD |
3100 | break; |
3101 | } | |
98dabdac | 3102 | return error; |
984263bc MD |
3103 | } |
3104 | ||
3105 | static void | |
33c39a69 | 3106 | bge_watchdog(struct ifnet *ifp) |
984263bc | 3107 | { |
33c39a69 | 3108 | struct bge_softc *sc = ifp->if_softc; |
984263bc | 3109 | |
c6fd6f3b | 3110 | if_printf(ifp, "watchdog timeout -- resetting\n"); |
984263bc MD |
3111 | |
3112 | ifp->if_flags &= ~IFF_RUNNING; | |
3113 | bge_init(sc); | |
3114 | ||
3115 | ifp->if_oerrors++; | |
2f54d1d2 SZ |
3116 | |
3117 | if (!ifq_is_empty(&ifp->if_snd)) | |
9db4b353 | 3118 | if_devstart(ifp); |
984263bc MD |
3119 | } |
3120 | ||
3121 | /* | |
3122 | * Stop the adapter and free any mbufs allocated to the | |
3123 | * RX and TX lists. | |
3124 | */ | |
3125 | static void | |
33c39a69 | 3126 | bge_stop(struct bge_softc *sc) |
984263bc | 3127 | { |
33c39a69 | 3128 | struct ifnet *ifp = &sc->arpcom.ac_if; |
984263bc MD |
3129 | struct ifmedia_entry *ifm; |
3130 | struct mii_data *mii = NULL; | |
3131 | int mtmp, itmp; | |
3132 | ||
aa65409c SZ |
3133 | ASSERT_SERIALIZED(ifp->if_serializer); |
3134 | ||
0ecb11d7 | 3135 | if ((sc->bge_flags & BGE_FLAG_TBI) == 0) |
984263bc MD |
3136 | mii = device_get_softc(sc->bge_miibus); |
3137 | ||
263489fb | 3138 | callout_stop(&sc->bge_stat_timer); |
984263bc MD |
3139 | |
3140 | /* | |
3141 | * Disable all of the receiver blocks | |
3142 | */ | |
3143 | BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); | |
3144 | BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); | |
3145 | BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); | |
0ecb11d7 | 3146 | if (!BGE_IS_5705_PLUS(sc)) |
7e40b8c5 | 3147 | BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); |
984263bc MD |
3148 | BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); |
3149 | BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); | |
3150 | BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); | |
3151 | ||
3152 | /* | |
3153 | * Disable all of the transmit blocks | |
3154 | */ | |
3155 | BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); | |
3156 | BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); | |
3157 | BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); | |
3158 | BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); | |
3159 | BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); | |
0ecb11d7 | 3160 | if (!BGE_IS_5705_PLUS(sc)) |
7e40b8c5 | 3161 | BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); |
984263bc MD |
3162 | BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); |
3163 | ||
3164 | /* | |
3165 | * Shut down all of the memory managers and related | |
3166 | * state machines. | |
3167 | */ | |
3168 | BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); | |
3169 | BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); | |
0ecb11d7 | 3170 | if (!BGE_IS_5705_PLUS(sc)) |
7e40b8c5 | 3171 | BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); |
984263bc MD |
3172 | CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); |
3173 | CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); | |
0ecb11d7 | 3174 | if (!BGE_IS_5705_PLUS(sc)) { |
7e40b8c5 HP |
3175 | BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); |
3176 | BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); | |
3177 | } | |
984263bc MD |
3178 | |
3179 | /* Disable host interrupts. */ | |
ba39cc82 | 3180 | bge_disable_intr(sc); |
984263bc MD |
3181 | |
3182 | /* | |
3183 | * Tell firmware we're shutting down. | |
3184 | */ | |
3185 | BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); | |
3186 | ||
3187 | /* Free the RX lists. */ | |
3188 | bge_free_rx_ring_std(sc); | |
3189 | ||
3190 | /* Free jumbo RX list. */ | |
0ecb11d7 | 3191 | if (BGE_IS_JUMBO_CAPABLE(sc)) |
7e40b8c5 | 3192 | bge_free_rx_ring_jumbo(sc); |
984263bc MD |
3193 | |
3194 | /* Free TX buffers. */ | |
3195 | bge_free_tx_ring(sc); | |
3196 | ||
3197 | /* | |
3198 | * Isolate/power down the PHY, but leave the media selection | |
3199 | * unchanged so that things will be put back to normal when | |
3200 | * we bring the interface back up. | |
32715c56 SZ |
3201 | * |
3202 | * 'mii' may be NULL in the following cases: | |
3203 | * - The device uses TBI. | |
3204 | * - bge_stop() is called by bge_detach(). | |
984263bc | 3205 | */ |
32715c56 | 3206 | if (mii != NULL) { |
984263bc MD |
3207 | itmp = ifp->if_flags; |
3208 | ifp->if_flags |= IFF_UP; | |
3209 | ifm = mii->mii_media.ifm_cur; | |
3210 | mtmp = ifm->ifm_media; | |
3211 | ifm->ifm_media = IFM_ETHER|IFM_NONE; | |
3212 | mii_mediachg(mii); | |
3213 | ifm->ifm_media = mtmp; | |
3214 | ifp->if_flags = itmp; | |
3215 | } | |
3216 | ||
3217 | sc->bge_link = 0; | |
055d06f0 | 3218 | sc->bge_coal_chg = 0; |
984263bc MD |
3219 | |
3220 | sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; | |
3221 | ||
3222 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); | |
e2c1cee4 | 3223 | ifp->if_timer = 0; |
984263bc MD |
3224 | } |
3225 | ||
3226 | /* | |
3227 | * Stop all chip I/O so that the kernel's probe routines don't | |
3228 | * get confused by errant DMAs when rebooting. | |
3229 | */ | |
3230 | static void | |
33c39a69 | 3231 | bge_shutdown(device_t dev) |
984263bc | 3232 | { |
33c39a69 | 3233 | struct bge_softc *sc = device_get_softc(dev); |
aa65409c | 3234 | struct ifnet *ifp = &sc->arpcom.ac_if; |
984263bc | 3235 | |
aa65409c SZ |
3236 | lwkt_serialize_enter(ifp->if_serializer); |
3237 | bge_stop(sc); | |
984263bc | 3238 | bge_reset(sc); |
aa65409c SZ |
3239 | lwkt_serialize_exit(ifp->if_serializer); |
3240 | } | |
3241 | ||
3242 | static int | |
3243 | bge_suspend(device_t dev) | |
3244 | { | |
3245 | struct bge_softc *sc = device_get_softc(dev); | |
3246 | struct ifnet *ifp = &sc->arpcom.ac_if; | |
3247 | ||
3248 | lwkt_serialize_enter(ifp->if_serializer); | |
3249 | bge_stop(sc); | |
3250 | lwkt_serialize_exit(ifp->if_serializer); | |
3251 | ||
3252 | return 0; | |
3253 | } | |
3254 | ||
3255 | static int | |
3256 | bge_resume(device_t dev) | |
3257 | { | |
3258 | struct bge_softc *sc = device_get_softc(dev); | |
3259 | struct ifnet *ifp = &sc->arpcom.ac_if; | |
3260 | ||
3261 | lwkt_serialize_enter(ifp->if_serializer); | |
3262 | ||
3263 | if (ifp->if_flags & IFF_UP) { | |
3264 | bge_init(sc); | |
3265 | ||
20c9a969 | 3266 | if (!ifq_is_empty(&ifp->if_snd)) |
9db4b353 | 3267 | if_devstart(ifp); |
aa65409c SZ |
3268 | } |
3269 | ||
3270 | lwkt_serialize_exit(ifp->if_serializer); | |
3271 | ||
3272 | return 0; | |
984263bc | 3273 | } |
6439b28a SZ |
3274 | |
3275 | static void | |
3276 | bge_setpromisc(struct bge_softc *sc) | |
3277 | { | |
3278 | struct ifnet *ifp = &sc->arpcom.ac_if; | |
3279 | ||
3280 | if (ifp->if_flags & IFF_PROMISC) | |
3281 | BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); | |
3282 | else | |
3283 | BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); | |
3284 | } | |
20c9a969 SZ |
3285 | |
3286 | static void | |
3287 | bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) | |
3288 | { | |
3289 | struct bge_dmamap_arg *ctx = arg; | |
3290 | ||
3291 | if (error) | |
3292 | return; | |
3293 | ||
3294 | KASSERT(nsegs == 1 && ctx->bge_maxsegs == 1, | |
3295 | ("only one segment is allowed\n")); | |
3296 | ||
3297 | ctx->bge_segs[0] = *segs; | |
3298 | } | |
3299 | ||
3300 | static void | |
3301 | bge_dma_map_mbuf(void *arg, bus_dma_segment_t *segs, int nsegs, | |
3302 | bus_size_t mapsz __unused, int error) | |
3303 | { | |
3304 | struct bge_dmamap_arg *ctx = arg; | |
3305 | int i; | |
3306 | ||
3307 | if (error) | |
3308 | return; | |
3309 | ||
3310 | if (nsegs > ctx->bge_maxsegs) { | |
3311 | ctx->bge_maxsegs = 0; | |
3312 | return; | |
3313 | } | |
3314 | ||
3315 | ctx->bge_maxsegs = nsegs; | |
3316 | for (i = 0; i < nsegs; ++i) | |
3317 | ctx->bge_segs[i] = segs[i]; | |
3318 | } | |
3319 | ||
3320 | static void | |
3321 | bge_dma_free(struct bge_softc *sc) | |
3322 | { | |
3323 | int i; | |
3324 | ||
3325 | /* Destroy RX/TX mbuf DMA stuffs. */ | |
3326 | if (sc->bge_cdata.bge_mtag != NULL) { | |
3327 | for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { | |
3328 | if (sc->bge_cdata.bge_rx_std_dmamap[i]) { | |
3329 | bus_dmamap_destroy(sc->bge_cdata.bge_mtag, | |
3330 | sc->bge_cdata.bge_rx_std_dmamap[i]); | |
3331 | } | |
3332 | } | |
3333 | ||
3334 | for (i = 0; i < BGE_TX_RING_CNT; i++) { | |
3335 | if (sc->bge_cdata.bge_tx_dmamap[i]) { | |
3336 | bus_dmamap_destroy(sc->bge_cdata.bge_mtag, | |
3337 | sc->bge_cdata.bge_tx_dmamap[i]); | |
3338 | } | |
3339 | } | |
3340 | bus_dma_tag_destroy(sc->bge_cdata.bge_mtag); | |
3341 | } | |
3342 | ||
3343 | /* Destroy standard RX ring */ | |
3344 | bge_dma_block_free(sc->bge_cdata.bge_rx_std_ring_tag, | |
3345 | sc->bge_cdata.bge_rx_std_ring_map, | |
3346 | sc->bge_ldata.bge_rx_std_ring); | |
3347 | ||
0ecb11d7 | 3348 | if (BGE_IS_JUMBO_CAPABLE(sc)) |
20c9a969 SZ |
3349 | bge_free_jumbo_mem(sc); |
3350 | ||
3351 | /* Destroy RX return ring */ | |
3352 | bge_dma_block_free(sc->bge_cdata.bge_rx_return_ring_tag, | |
3353 | sc->bge_cdata.bge_rx_return_ring_map, | |
3354 | sc->bge_ldata.bge_rx_return_ring); | |
3355 | ||
3356 | /* Destroy TX ring */ | |
3357 | bge_dma_block_free(sc->bge_cdata.bge_tx_ring_tag, | |
3358 | sc->bge_cdata.bge_tx_ring_map, | |
3359 | sc->bge_ldata.bge_tx_ring); | |
3360 | ||
3361 | /* Destroy status block */ | |
3362 | bge_dma_block_free(sc->bge_cdata.bge_status_tag, | |
3363 | sc->bge_cdata.bge_status_map, | |
3364 | sc->bge_ldata.bge_status_block); | |
3365 | ||
3366 | /* Destroy statistics block */ | |
3367 | bge_dma_block_free(sc->bge_cdata.bge_stats_tag, | |
3368 | sc->bge_cdata.bge_stats_map, | |
3369 | sc->bge_ldata.bge_stats); | |
3370 | ||
3371 | /* Destroy the parent tag */ | |
3372 | if (sc->bge_cdata.bge_parent_tag != NULL) | |