gdb - Local mods (compile)
[dragonfly.git] / sys / dev / netif / bge / if_bge.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 *
011c0f93 33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
984263bc
MD
34 */
35
36/*
37 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
38 *
39 * Written by Bill Paul <wpaul@windriver.com>
40 * Senior Engineer, Wind River Systems
41 */
42
43/*
44 * The Broadcom BCM5700 is based on technology originally developed by
45 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
46 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
47 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
48 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
49 * frames, highly configurable RX filtering, and 16 RX and TX queues
50 * (which, along with RX filter rules, can be used for QOS applications).
51 * Other features, such as TCP segmentation, may be available as part
52 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
53 * firmware images can be stored in hardware and need not be compiled
54 * into the driver.
55 *
56 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
57 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
58 *
59 * The BCM5701 is a single-chip solution incorporating both the BCM5700
60 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
61 * does not support external SSRAM.
62 *
63 * Broadcom also produces a variation of the BCM5700 under the "Altima"
64 * brand name, which is functionally similar but lacks PCI-X support.
65 *
66 * Without external SSRAM, you can only have at most 4 TX rings,
67 * and the use of the mini RX ring is disabled. This seems to imply
68 * that these features are simply not available on the BCM5701. As a
69 * result, this driver does not implement any support for the mini RX
70 * ring.
71 */
72
7e1b2526 73#include "opt_ifpoll.h"
a7db2caa 74
984263bc 75#include <sys/param.h>
62be1357 76#include <sys/bus.h>
20c9a969 77#include <sys/endian.h>
62be1357 78#include <sys/kernel.h>
6b880771 79#include <sys/ktr.h>
9db4b353 80#include <sys/interrupt.h>
984263bc
MD
81#include <sys/mbuf.h>
82#include <sys/malloc.h>
984263bc 83#include <sys/queue.h>
62be1357 84#include <sys/rman.h>
16dca0df 85#include <sys/serialize.h>
62be1357
SZ
86#include <sys/socket.h>
87#include <sys/sockio.h>
055d06f0 88#include <sys/sysctl.h>
984263bc 89
e92f005c
SZ
90#include <netinet/ip.h>
91#include <netinet/tcp.h>
92
62be1357
SZ
93#include <net/bpf.h>
94#include <net/ethernet.h>
984263bc
MD
95#include <net/if.h>
96#include <net/if_arp.h>
984263bc
MD
97#include <net/if_dl.h>
98#include <net/if_media.h>
7e1b2526 99#include <net/if_poll.h>
984263bc 100#include <net/if_types.h>
62be1357 101#include <net/ifq_var.h>
1f2de5d4 102#include <net/vlan/if_vlan_var.h>
b637f170 103#include <net/vlan/if_vlan_ether.h>
984263bc 104
1f2de5d4
MD
105#include <dev/netif/mii_layer/mii.h>
106#include <dev/netif/mii_layer/miivar.h>
1f2de5d4 107#include <dev/netif/mii_layer/brgphyreg.h>
984263bc 108
dcb4b80d 109#include "pcidevs.h"
1f2de5d4
MD
110#include <bus/pci/pcireg.h>
111#include <bus/pci/pcivar.h>
984263bc 112
62be1357 113#include <dev/netif/bge/if_bgereg.h>
8ff8bce6 114#include <dev/netif/bge/if_bgevar.h>
62be1357
SZ
115
116/* "device miibus" required. See GENERIC if you get errors here. */
117#include "miibus_if.h"
984263bc 118
3daed3db 119#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
984263bc 120
57b62224
SZ
121#define BGE_RESET_SHUTDOWN 0
122#define BGE_RESET_START 1
123#define BGE_RESET_SUSPEND 2
124
da4fe422
SZ
125static const struct bge_type {
126 uint16_t bge_vid;
127 uint16_t bge_did;
128 char *bge_name;
129} bge_devs[] = {
0ecb11d7
SZ
130 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996,
131 "3COM 3C996 Gigabit Ethernet" },
132
f952ab63 133 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700,
9a6ee7e2
JS
134 "Alteon BCM5700 Gigabit Ethernet" },
135 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701,
136 "Alteon BCM5701 Gigabit Ethernet" },
0ecb11d7
SZ
137
138 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000,
139 "Altima AC1000 Gigabit Ethernet" },
140 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001,
141 "Altima AC1002 Gigabit Ethernet" },
142 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100,
143 "Altima AC9100 Gigabit Ethernet" },
144
145 { PCI_VENDOR_APPLE, PCI_PRODUCT_APPLE_BCM5701,
146 "Apple BCM5701 Gigabit Ethernet" },
147
f952ab63 148 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700,
984263bc 149 "Broadcom BCM5700 Gigabit Ethernet" },
f952ab63 150 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701,
984263bc 151 "Broadcom BCM5701 Gigabit Ethernet" },
0ecb11d7
SZ
152 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702,
153 "Broadcom BCM5702 Gigabit Ethernet" },
f952ab63 154 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X,
984263bc 155 "Broadcom BCM5702X Gigabit Ethernet" },
9a6ee7e2
JS
156 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT,
157 "Broadcom BCM5702 Gigabit Ethernet" },
0ecb11d7
SZ
158 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703,
159 "Broadcom BCM5703 Gigabit Ethernet" },
f952ab63
JS
160 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X,
161 "Broadcom BCM5703X Gigabit Ethernet" },
9a6ee7e2
JS
162 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703A3,
163 "Broadcom BCM5703 Gigabit Ethernet" },
f952ab63 164 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C,
984263bc 165 "Broadcom BCM5704C Dual Gigabit Ethernet" },
f952ab63 166 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S,
984263bc 167 "Broadcom BCM5704S Dual Gigabit Ethernet" },
0ecb11d7
SZ
168 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S_ALT,
169 "Broadcom BCM5704S Dual Gigabit Ethernet" },
f952ab63 170 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705,
7e40b8c5 171 "Broadcom BCM5705 Gigabit Ethernet" },
0ecb11d7
SZ
172 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705F,
173 "Broadcom BCM5705F Gigabit Ethernet" },
9a6ee7e2
JS
174 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K,
175 "Broadcom BCM5705K Gigabit Ethernet" },
f952ab63 176 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M,
7e40b8c5 177 "Broadcom BCM5705M Gigabit Ethernet" },
9a6ee7e2 178 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT,
7e40b8c5 179 "Broadcom BCM5705M Gigabit Ethernet" },
92decf65 180 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714,
9a6ee7e2 181 "Broadcom BCM5714C Gigabit Ethernet" },
0ecb11d7
SZ
182 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714S,
183 "Broadcom BCM5714S Gigabit Ethernet" },
184 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715,
185 "Broadcom BCM5715 Gigabit Ethernet" },
186 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715S,
187 "Broadcom BCM5715S Gigabit Ethernet" },
188 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720,
189 "Broadcom BCM5720 Gigabit Ethernet" },
9a6ee7e2
JS
190 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721,
191 "Broadcom BCM5721 Gigabit Ethernet" },
0ecb11d7
SZ
192 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5722,
193 "Broadcom BCM5722 Gigabit Ethernet" },
f47afe1a
MN
194 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5723,
195 "Broadcom BCM5723 Gigabit Ethernet" },
9a6ee7e2
JS
196 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750,
197 "Broadcom BCM5750 Gigabit Ethernet" },
198 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750M,
199 "Broadcom BCM5750M Gigabit Ethernet" },
b7bef88c
JS
200 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751,
201 "Broadcom BCM5751 Gigabit Ethernet" },
0ecb11d7
SZ
202 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751F,
203 "Broadcom BCM5751F Gigabit Ethernet" },
9a6ee7e2
JS
204 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M,
205 "Broadcom BCM5751M Gigabit Ethernet" },
bae5fe9a
SZ
206 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752,
207 "Broadcom BCM5752 Gigabit Ethernet" },
0ecb11d7
SZ
208 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752M,
209 "Broadcom BCM5752M Gigabit Ethernet" },
210 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753,
211 "Broadcom BCM5753 Gigabit Ethernet" },
212 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753F,
213 "Broadcom BCM5753F Gigabit Ethernet" },
214 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753M,
215 "Broadcom BCM5753M Gigabit Ethernet" },
216 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754,
217 "Broadcom BCM5754 Gigabit Ethernet" },
218 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754M,
219 "Broadcom BCM5754M Gigabit Ethernet" },
220 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755,
221 "Broadcom BCM5755 Gigabit Ethernet" },
222 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755M,
223 "Broadcom BCM5755M Gigabit Ethernet" },
224 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5756,
225 "Broadcom BCM5756 Gigabit Ethernet" },
f47afe1a
MN
226 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761,
227 "Broadcom BCM5761 Gigabit Ethernet" },
228 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761E,
229 "Broadcom BCM5761E Gigabit Ethernet" },
230 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761S,
231 "Broadcom BCM5761S Gigabit Ethernet" },
232 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761SE,
233 "Broadcom BCM5761SE Gigabit Ethernet" },
234 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5764,
235 "Broadcom BCM5764 Gigabit Ethernet" },
0ecb11d7
SZ
236 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780,
237 "Broadcom BCM5780 Gigabit Ethernet" },
238 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780S,
239 "Broadcom BCM5780S Gigabit Ethernet" },
240 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5781,
241 "Broadcom BCM5781 Gigabit Ethernet" },
f952ab63 242 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782,
7e40b8c5 243 "Broadcom BCM5782 Gigabit Ethernet" },
f47afe1a
MN
244 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5784,
245 "Broadcom BCM5784 Gigabit Ethernet" },
246 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785F,
247 "Broadcom BCM5785F Gigabit Ethernet" },
248 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785G,
249 "Broadcom BCM5785G Gigabit Ethernet" },
0ecb11d7
SZ
250 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5786,
251 "Broadcom BCM5786 Gigabit Ethernet" },
252 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787,
253 "Broadcom BCM5787 Gigabit Ethernet" },
254 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787F,
255 "Broadcom BCM5787F Gigabit Ethernet" },
256 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787M,
257 "Broadcom BCM5787M Gigabit Ethernet" },
9a6ee7e2 258 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788,
f952ab63 259 "Broadcom BCM5788 Gigabit Ethernet" },
9a6ee7e2
JS
260 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789,
261 "Broadcom BCM5789 Gigabit Ethernet" },
f952ab63
JS
262 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901,
263 "Broadcom BCM5901 Fast Ethernet" },
264 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2,
265 "Broadcom BCM5901A2 Fast Ethernet" },
0ecb11d7
SZ
266 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5903M,
267 "Broadcom BCM5903M Fast Ethernet" },
591dfc77
SZ
268 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906,
269 "Broadcom BCM5906 Fast Ethernet"},
270 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906M,
271 "Broadcom BCM5906M Fast Ethernet"},
f47afe1a
MN
272 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57760,
273 "Broadcom BCM57760 Gigabit Ethernet"},
274 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57780,
275 "Broadcom BCM57780 Gigabit Ethernet"},
276 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57788,
277 "Broadcom BCM57788 Gigabit Ethernet"},
278 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57790,
279 "Broadcom BCM57790 Gigabit Ethernet"},
f952ab63 280 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
984263bc 281 "SysKonnect Gigabit Ethernet" },
0ecb11d7 282
984263bc
MD
283 { 0, 0, NULL }
284};
285
0ecb11d7
SZ
286#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
287#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
288#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
289#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
290#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
f47afe1a 291#define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
5225ba10 292#define BGE_IS_5788(sc) ((sc)->bge_flags & BGE_FLAG_5788)
0ecb11d7 293
14852ccc
SZ
294#define BGE_IS_CRIPPLED(sc) \
295 (BGE_IS_5788((sc)) || (sc)->bge_asicrev == BGE_ASICREV_BCM5700)
296
591dfc77
SZ
297typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
298
33c39a69
JS
299static int bge_probe(device_t);
300static int bge_attach(device_t);
301static int bge_detach(device_t);
90ad1c96 302static void bge_txeof(struct bge_softc *, uint16_t);
1c40ce90 303static void bge_rxeof(struct bge_softc *, uint16_t, int);
33c39a69
JS
304
305static void bge_tick(void *);
306static void bge_stats_update(struct bge_softc *);
307static void bge_stats_update_regs(struct bge_softc *);
e0b35c1f
SZ
308static struct mbuf *
309 bge_defrag_shortdma(struct mbuf *);
aad8b3fe
SZ
310static int bge_encap(struct bge_softc *, struct mbuf **,
311 uint32_t *, int *);
312static void bge_xmit(struct bge_softc *, uint32_t);
e92f005c
SZ
313static int bge_setup_tso(struct bge_softc *, struct mbuf **,
314 uint16_t *, uint16_t *);
33c39a69 315
7e1b2526
SZ
316#ifdef IFPOLL_ENABLE
317static void bge_npoll(struct ifnet *, struct ifpoll_info *);
318static void bge_npoll_compat(struct ifnet *, void *, int );
315fe0ee 319#endif
308dcd8e
SZ
320static void bge_intr_crippled(void *);
321static void bge_intr_legacy(void *);
322static void bge_msi(void *);
323static void bge_msi_oneshot(void *);
324static void bge_intr(struct bge_softc *);
ba39cc82
SZ
325static void bge_enable_intr(struct bge_softc *);
326static void bge_disable_intr(struct bge_softc *);
f0a26983 327static void bge_start(struct ifnet *, struct ifaltq_subque *);
33c39a69
JS
328static int bge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
329static void bge_init(void *);
330static void bge_stop(struct bge_softc *);
331static void bge_watchdog(struct ifnet *);
332static void bge_shutdown(device_t);
aa65409c
SZ
333static int bge_suspend(device_t);
334static int bge_resume(device_t);
33c39a69
JS
335static int bge_ifmedia_upd(struct ifnet *);
336static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
337
591dfc77
SZ
338static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
339static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
340
33c39a69
JS
341static uint8_t bge_eeprom_getbyte(struct bge_softc *, uint32_t, uint8_t *);
342static int bge_read_eeprom(struct bge_softc *, caddr_t, uint32_t, size_t);
343
33c39a69 344static void bge_setmulti(struct bge_softc *);
6439b28a 345static void bge_setpromisc(struct bge_softc *);
308dcd8e 346static void bge_enable_msi(struct bge_softc *sc);
33c39a69 347
33c39a69
JS
348static int bge_alloc_jumbo_mem(struct bge_softc *);
349static void bge_free_jumbo_mem(struct bge_softc *);
2aa9b12f
JS
350static struct bge_jslot
351 *bge_jalloc(struct bge_softc *);
352static void bge_jfree(void *);
353static void bge_jref(void *);
1436f9a0
SZ
354static int bge_newbuf_std(struct bge_softc *, int, int);
355static int bge_newbuf_jumbo(struct bge_softc *, int, int);
356static void bge_setup_rxdesc_std(struct bge_softc *, int);
357static void bge_setup_rxdesc_jumbo(struct bge_softc *, int);
33c39a69
JS
358static int bge_init_rx_ring_std(struct bge_softc *);
359static void bge_free_rx_ring_std(struct bge_softc *);
360static int bge_init_rx_ring_jumbo(struct bge_softc *);
361static void bge_free_rx_ring_jumbo(struct bge_softc *);
362static void bge_free_tx_ring(struct bge_softc *);
363static int bge_init_tx_ring(struct bge_softc *);
364
365static int bge_chipinit(struct bge_softc *);
366static int bge_blockinit(struct bge_softc *);
6ac6e1b9 367static void bge_stop_block(struct bge_softc *, bus_size_t, uint32_t);
984263bc 368
33c39a69
JS
369static uint32_t bge_readmem_ind(struct bge_softc *, uint32_t);
370static void bge_writemem_ind(struct bge_softc *, uint32_t, uint32_t);
984263bc 371#ifdef notdef
33c39a69 372static uint32_t bge_readreg_ind(struct bge_softc *, uint32_t);
984263bc 373#endif
33c39a69 374static void bge_writereg_ind(struct bge_softc *, uint32_t, uint32_t);
0ecb11d7 375static void bge_writemem_direct(struct bge_softc *, uint32_t, uint32_t);
591dfc77 376static void bge_writembx(struct bge_softc *, int, int);
984263bc 377
33c39a69
JS
378static int bge_miibus_readreg(device_t, int, int);
379static int bge_miibus_writereg(device_t, int, int, int);
380static void bge_miibus_statchg(device_t);
db861466
SZ
381static void bge_bcm5700_link_upd(struct bge_softc *, uint32_t);
382static void bge_tbi_link_upd(struct bge_softc *, uint32_t);
383static void bge_copper_link_upd(struct bge_softc *, uint32_t);
2dd0af35 384static void bge_autopoll_link_upd(struct bge_softc *, uint32_t);
e287b14f 385static void bge_link_poll(struct bge_softc *);
984263bc 386
33c39a69 387static void bge_reset(struct bge_softc *);
984263bc 388
20c9a969
SZ
389static int bge_dma_alloc(struct bge_softc *);
390static void bge_dma_free(struct bge_softc *);
391static int bge_dma_block_alloc(struct bge_softc *, bus_size_t,
392 bus_dma_tag_t *, bus_dmamap_t *,
393 void **, bus_addr_t *);
394static void bge_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
395
591dfc77
SZ
396static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
397static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
398static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
399static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
400
055d06f0
SZ
401static void bge_coal_change(struct bge_softc *);
402static int bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS);
403static int bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS);
90ad1c96
SZ
404static int bge_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS);
405static int bge_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS);
406static int bge_sysctl_rx_coal_ticks_int(SYSCTL_HANDLER_ARGS);
407static int bge_sysctl_tx_coal_ticks_int(SYSCTL_HANDLER_ARGS);
408static int bge_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS);
409static int bge_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS);
489391fe
SZ
410static int bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *,
411 int, int, uint32_t);
055d06f0 412
57b62224
SZ
413static void bge_sig_post_reset(struct bge_softc *, int);
414static void bge_sig_legacy(struct bge_softc *, int);
415static void bge_sig_pre_reset(struct bge_softc *, int);
416static void bge_stop_fw(struct bge_softc *);
417static void bge_asf_driver_up(struct bge_softc *);
418
ea320e53
SZ
419static void bge_ape_lock_init(struct bge_softc *);
420static void bge_ape_read_fw_ver(struct bge_softc *);
421static int bge_ape_lock(struct bge_softc *, int);
422static void bge_ape_unlock(struct bge_softc *, int);
423static void bge_ape_send_event(struct bge_softc *, uint32_t);
424static void bge_ape_driver_state_change(struct bge_softc *, int);
425
5c56d5d8
SZ
426/*
427 * Set following tunable to 1 for some IBM blade servers with the DNLK
428 * switch module. Auto negotiation is broken for those configurations.
429 */
430static int bge_fake_autoneg = 0;
431TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg);
432
308dcd8e
SZ
433static int bge_msi_enable = 1;
434TUNABLE_INT("hw.bge.msi.enable", &bge_msi_enable);
435
57b62224
SZ
436static int bge_allow_asf = 1;
437TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
438
6b880771
SZ
439#if !defined(KTR_IF_BGE)
440#define KTR_IF_BGE KTR_ALL
441#endif
442KTR_INFO_MASTER(if_bge);
5bf48697
AE
443KTR_INFO(KTR_IF_BGE, if_bge, intr, 0, "intr");
444KTR_INFO(KTR_IF_BGE, if_bge, rx_pkt, 1, "rx_pkt");
445KTR_INFO(KTR_IF_BGE, if_bge, tx_pkt, 2, "tx_pkt");
6b880771
SZ
446#define logif(name) KTR_LOG(if_bge_ ## name)
447
984263bc
MD
448static device_method_t bge_methods[] = {
449 /* Device interface */
450 DEVMETHOD(device_probe, bge_probe),
451 DEVMETHOD(device_attach, bge_attach),
452 DEVMETHOD(device_detach, bge_detach),
453 DEVMETHOD(device_shutdown, bge_shutdown),
aa65409c
SZ
454 DEVMETHOD(device_suspend, bge_suspend),
455 DEVMETHOD(device_resume, bge_resume),
984263bc
MD
456
457 /* bus interface */
458 DEVMETHOD(bus_print_child, bus_generic_print_child),
459 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
460
461 /* MII interface */
462 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
463 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
464 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
465
d3c9c58e 466 DEVMETHOD_END
984263bc
MD
467};
468
33c39a69 469static DEFINE_CLASS_0(bge, bge_driver, bge_methods, sizeof(struct bge_softc));
984263bc
MD
470static devclass_t bge_devclass;
471
32832096 472DECLARE_DUMMY_MODULE(if_bge);
1996f1e5 473MODULE_DEPEND(if_bge, miibus, 1, 1, 1);
aa2b9d05
SW
474DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, NULL, NULL);
475DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, NULL, NULL);
984263bc 476
33c39a69
JS
477static uint32_t
478bge_readmem_ind(struct bge_softc *sc, uint32_t off)
984263bc 479{
33c39a69 480 device_t dev = sc->bge_dev;
0ecb11d7 481 uint32_t val;
984263bc 482
81418829
SZ
483 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
484 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
485 return 0;
486
984263bc 487 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
0ecb11d7
SZ
488 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
489 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
490 return (val);
984263bc
MD
491}
492
493static void
33c39a69 494bge_writemem_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
984263bc 495{
33c39a69 496 device_t dev = sc->bge_dev;
984263bc 497
81418829
SZ
498 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
499 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
500 return;
501
984263bc
MD
502 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
503 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
0ecb11d7 504 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
984263bc
MD
505}
506
507#ifdef notdef
33c39a69
JS
508static uint32_t
509bge_readreg_ind(struct bge_softc *sc, uin32_t off)
984263bc 510{
33c39a69 511 device_t dev = sc->bge_dev;
984263bc
MD
512
513 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
514 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
515}
516#endif
517
518static void
33c39a69 519bge_writereg_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
984263bc 520{
33c39a69 521 device_t dev = sc->bge_dev;
984263bc
MD
522
523 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
524 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
984263bc
MD
525}
526
0ecb11d7
SZ
527static void
528bge_writemem_direct(struct bge_softc *sc, uint32_t off, uint32_t val)
529{
530 CSR_WRITE_4(sc, off, val);
531}
532
591dfc77
SZ
533static void
534bge_writembx(struct bge_softc *sc, int off, int val)
535{
536 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
537 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
538
539 CSR_WRITE_4(sc, off, val);
e8b8fe83
SZ
540 if (sc->bge_mbox_reorder)
541 CSR_READ_4(sc, off);
591dfc77
SZ
542}
543
544static uint8_t
545bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
546{
547 uint32_t access, byte = 0;
548 int i;
549
550 /* Lock. */
551 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
552 for (i = 0; i < 8000; i++) {
553 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
554 break;
555 DELAY(20);
556 }
557 if (i == 8000)
558 return (1);
559
560 /* Enable access. */
561 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
562 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
563
564 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
565 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
566 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
567 DELAY(10);
568 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
569 DELAY(10);
570 break;
571 }
572 }
573
574 if (i == BGE_TIMEOUT * 10) {
575 if_printf(&sc->arpcom.ac_if, "nvram read timed out\n");
576 return (1);
577 }
578
579 /* Get result. */
580 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
581
582 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
583
584 /* Disable access. */
585 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
586
587 /* Unlock. */
588 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
589 CSR_READ_4(sc, BGE_NVRAM_SWARB);
590
591 return (0);
592}
593
594/*
595 * Read a sequence of bytes from NVRAM.
596 */
597static int
598bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
599{
600 int err = 0, i;
601 uint8_t byte = 0;
602
603 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
604 return (1);
605
606 for (i = 0; i < cnt; i++) {
607 err = bge_nvram_getbyte(sc, off + i, &byte);
608 if (err)
609 break;
610 *(dest + i) = byte;
611 }
612
613 return (err ? 1 : 0);
614}
615
984263bc
MD
616/*
617 * Read a byte of data stored in the EEPROM at address 'addr.' The
618 * BCM570x supports both the traditional bitbang interface and an
619 * auto access interface for reading the EEPROM. We use the auto
620 * access method.
621 */
33c39a69
JS
622static uint8_t
623bge_eeprom_getbyte(struct bge_softc *sc, uint32_t addr, uint8_t *dest)
984263bc
MD
624{
625 int i;
33c39a69 626 uint32_t byte = 0;
984263bc
MD
627
628 /*
629 * Enable use of auto EEPROM access so we can avoid
630 * having to use the bitbang method.
631 */
632 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
633
634 /* Reset the EEPROM, load the clock period. */
635 CSR_WRITE_4(sc, BGE_EE_ADDR,
636 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
637 DELAY(20);
638
639 /* Issue the read EEPROM command. */
640 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
641
642 /* Wait for completion */
643 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
644 DELAY(10);
645 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
646 break;
647 }
648
649 if (i == BGE_TIMEOUT) {
c6fd6f3b 650 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
7b47d9c2 651 return(1);
984263bc
MD
652 }
653
654 /* Get result. */
655 byte = CSR_READ_4(sc, BGE_EE_DATA);
656
657 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
658
659 return(0);
660}
661
662/*
663 * Read a sequence of bytes from the EEPROM.
664 */
665static int
33c39a69 666bge_read_eeprom(struct bge_softc *sc, caddr_t dest, uint32_t off, size_t len)
984263bc 667{
33c39a69
JS
668 size_t i;
669 int err;
670 uint8_t byte;
984263bc 671
33c39a69 672 for (byte = 0, err = 0, i = 0; i < len; i++) {
984263bc
MD
673 err = bge_eeprom_getbyte(sc, off + i, &byte);
674 if (err)
675 break;
676 *(dest + i) = byte;
677 }
678
679 return(err ? 1 : 0);
680}
681
682static int
33c39a69 683bge_miibus_readreg(device_t dev, int phy, int reg)
984263bc 684{
f7a1f3ba 685 struct bge_softc *sc = device_get_softc(dev);
2dd0af35 686 uint32_t val;
984263bc
MD
687 int i;
688
fd894027
SZ
689 KASSERT(phy == sc->bge_phyno,
690 ("invalid phyno %d, should be %d", phy, sc->bge_phyno));
984263bc 691
ea320e53
SZ
692 if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
693 return 0;
694
2dd0af35
SZ
695 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
696 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
697 CSR_WRITE_4(sc, BGE_MI_MODE,
698 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
699 DELAY(80);
984263bc
MD
700 }
701
2dd0af35
SZ
702 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
703 BGE_MIPHY(phy) | BGE_MIREG(reg));
984263bc 704
2dd0af35 705 /* Poll for the PHY register access to complete. */
984263bc 706 for (i = 0; i < BGE_TIMEOUT; i++) {
f7a1f3ba 707 DELAY(10);
984263bc 708 val = CSR_READ_4(sc, BGE_MI_COMM);
2dd0af35
SZ
709 if ((val & BGE_MICOMM_BUSY) == 0) {
710 DELAY(5);
711 val = CSR_READ_4(sc, BGE_MI_COMM);
984263bc 712 break;
2dd0af35 713 }
984263bc 714 }
984263bc 715 if (i == BGE_TIMEOUT) {
2dd0af35
SZ
716 if_printf(&sc->arpcom.ac_if, "PHY read timed out "
717 "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val);
984263bc 718 val = 0;
984263bc
MD
719 }
720
2dd0af35
SZ
721 /* Restore the autopoll bit if necessary. */
722 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
723 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
724 DELAY(80);
984263bc
MD
725 }
726
ea320e53
SZ
727 bge_ape_unlock(sc, sc->bge_phy_ape_lock);
728
984263bc 729 if (val & BGE_MICOMM_READFAIL)
2dd0af35 730 return 0;
984263bc 731
2dd0af35 732 return (val & 0xFFFF);
984263bc
MD
733}
734
735static int
33c39a69 736bge_miibus_writereg(device_t dev, int phy, int reg, int val)
984263bc 737{
f7a1f3ba 738 struct bge_softc *sc = device_get_softc(dev);
984263bc
MD
739 int i;
740
fd894027
SZ
741 KASSERT(phy == sc->bge_phyno,
742 ("invalid phyno %d, should be %d", phy, sc->bge_phyno));
984263bc 743
591dfc77
SZ
744 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
745 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
2dd0af35 746 return 0;
591dfc77 747
ea320e53
SZ
748 if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
749 return 0;
750
2dd0af35
SZ
751 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
752 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
753 CSR_WRITE_4(sc, BGE_MI_MODE,
754 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
755 DELAY(80);
984263bc
MD
756 }
757
2dd0af35
SZ
758 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
759 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
984263bc
MD
760
761 for (i = 0; i < BGE_TIMEOUT; i++) {
f7a1f3ba
SZ
762 DELAY(10);
763 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
764 DELAY(5);
765 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
984263bc 766 break;
f7a1f3ba 767 }
984263bc 768 }
984263bc 769 if (i == BGE_TIMEOUT) {
f7a1f3ba 770 if_printf(&sc->arpcom.ac_if, "PHY write timed out "
2dd0af35 771 "(phy %d, reg %d, val %d)\n", phy, reg, val);
984263bc
MD
772 }
773
2dd0af35
SZ
774 /* Restore the autopoll bit if necessary. */
775 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
776 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
777 DELAY(80);
778 }
779
ea320e53
SZ
780 bge_ape_unlock(sc, sc->bge_phy_ape_lock);
781
2dd0af35 782 return 0;
984263bc
MD
783}
784
785static void
33c39a69 786bge_miibus_statchg(device_t dev)
984263bc
MD
787{
788 struct bge_softc *sc;
789 struct mii_data *mii;
e6fdacca 790 uint32_t mac_mode;
984263bc
MD
791
792 sc = device_get_softc(dev);
57b62224
SZ
793 if ((sc->arpcom.ac_if.if_flags & IFF_RUNNING) == 0)
794 return;
795
984263bc
MD
796 mii = device_get_softc(sc->bge_miibus);
797
2dd0af35
SZ
798 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
799 (IFM_ACTIVE | IFM_AVALID)) {
800 switch (IFM_SUBTYPE(mii->mii_media_active)) {
801 case IFM_10_T:
802 case IFM_100_TX:
803 sc->bge_link = 1;
804 break;
805 case IFM_1000_T:
806 case IFM_1000_SX:
807 case IFM_2500_SX:
808 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
809 sc->bge_link = 1;
810 else
811 sc->bge_link = 0;
812 break;
813 default:
814 sc->bge_link = 0;
815 break;
816 }
817 } else {
818 sc->bge_link = 0;
819 }
820 if (sc->bge_link == 0)
821 return;
822
e6fdacca
SZ
823 /*
824 * APE firmware touches these registers to keep the MAC
825 * connected to the outside world. Try to keep the
826 * accesses atomic.
827 */
828
829 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) &
830 ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX);
831
4d38e186 832 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
e6fdacca
SZ
833 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
834 mac_mode |= BGE_PORTMODE_GMII;
835 else
836 mac_mode |= BGE_PORTMODE_MII;
984263bc 837
e6fdacca
SZ
838 if ((mii->mii_media_active & IFM_GMASK) != IFM_FDX)
839 mac_mode |= BGE_MACMODE_HALF_DUPLEX;
840
841 CSR_WRITE_4(sc, BGE_MAC_MODE, mac_mode);
b87c7034 842 DELAY(40);
984263bc
MD
843}
844
984263bc
MD
845/*
846 * Memory management for jumbo frames.
847 */
984263bc 848static int
33c39a69 849bge_alloc_jumbo_mem(struct bge_softc *sc)
984263bc 850{
20c9a969 851 struct ifnet *ifp = &sc->arpcom.ac_if;
2aa9b12f 852 struct bge_jslot *entry;
20c9a969
SZ
853 uint8_t *ptr;
854 bus_addr_t paddr;
855 int i, error;
984263bc 856
20c9a969
SZ
857 /*
858 * Create tag for jumbo mbufs.
859 * This is really a bit of a kludge. We allocate a special
860 * jumbo buffer pool which (thanks to the way our DMA
861 * memory allocation works) will consist of contiguous
862 * pages. This means that even though a jumbo buffer might
863 * be larger than a page size, we don't really need to
864 * map it into more than one DMA segment. However, the
865 * default mbuf tag will result in multi-segment mappings,
866 * so we have to create a special jumbo mbuf tag that
867 * lets us get away with mapping the jumbo buffers as
868 * a single segment. I think eventually the driver should
869 * be changed so that it uses ordinary mbufs and cluster
870 * buffers, i.e. jumbo frames can span multiple DMA
871 * descriptors. But that's a project for another day.
872 */
984263bc 873
20c9a969
SZ
874 /*
875 * Create DMA stuffs for jumbo RX ring.
876 */
877 error = bge_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ,
878 &sc->bge_cdata.bge_rx_jumbo_ring_tag,
879 &sc->bge_cdata.bge_rx_jumbo_ring_map,
da44240f 880 (void *)&sc->bge_ldata.bge_rx_jumbo_ring,
20c9a969
SZ
881 &sc->bge_ldata.bge_rx_jumbo_ring_paddr);
882 if (error) {
883 if_printf(ifp, "could not create jumbo RX ring\n");
884 return error;
885 }
886
887 /*
888 * Create DMA stuffs for jumbo buffer block.
889 */
890 error = bge_dma_block_alloc(sc, BGE_JMEM,
891 &sc->bge_cdata.bge_jumbo_tag,
892 &sc->bge_cdata.bge_jumbo_map,
893 (void **)&sc->bge_ldata.bge_jumbo_buf,
894 &paddr);
895 if (error) {
896 if_printf(ifp, "could not create jumbo buffer\n");
897 return error;
984263bc
MD
898 }
899
900 SLIST_INIT(&sc->bge_jfree_listhead);
984263bc
MD
901
902 /*
903 * Now divide it up into 9K pieces and save the addresses
904 * in an array. Note that we play an evil trick here by using
905 * the first few bytes in the buffer to hold the the address
906 * of the softc structure for this interface. This is because
907 * bge_jfree() needs it, but it is called by the mbuf management
908 * code which will not pass it to us explicitly.
909 */
20c9a969 910 for (i = 0, ptr = sc->bge_ldata.bge_jumbo_buf; i < BGE_JSLOTS; i++) {
2aa9b12f
JS
911 entry = &sc->bge_cdata.bge_jslots[i];
912 entry->bge_sc = sc;
913 entry->bge_buf = ptr;
20c9a969 914 entry->bge_paddr = paddr;
2aa9b12f
JS
915 entry->bge_inuse = 0;
916 entry->bge_slot = i;
917 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jslot_link);
20c9a969 918
2aa9b12f 919 ptr += BGE_JLEN;
20c9a969 920 paddr += BGE_JLEN;
984263bc 921 }
20c9a969 922 return 0;
984263bc
MD
923}
924
925static void
33c39a69 926bge_free_jumbo_mem(struct bge_softc *sc)
984263bc 927{
20c9a969
SZ
928 /* Destroy jumbo RX ring. */
929 bge_dma_block_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
930 sc->bge_cdata.bge_rx_jumbo_ring_map,
931 sc->bge_ldata.bge_rx_jumbo_ring);
932
933 /* Destroy jumbo buffer block. */
934 bge_dma_block_free(sc->bge_cdata.bge_jumbo_tag,
935 sc->bge_cdata.bge_jumbo_map,
936 sc->bge_ldata.bge_jumbo_buf);
984263bc
MD
937}
938
939/*
940 * Allocate a jumbo buffer.
941 */
2aa9b12f 942static struct bge_jslot *
33c39a69 943bge_jalloc(struct bge_softc *sc)
984263bc 944{
2aa9b12f 945 struct bge_jslot *entry;
33c39a69 946
16dca0df 947 lwkt_serialize_enter(&sc->bge_jslot_serializer);
984263bc 948 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
16dca0df
MD
949 if (entry) {
950 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jslot_link);
951 entry->bge_inuse = 1;
952 } else {
c6fd6f3b 953 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
984263bc 954 }
16dca0df 955 lwkt_serialize_exit(&sc->bge_jslot_serializer);
2aa9b12f 956 return(entry);
984263bc
MD
957}
958
959/*
960 * Adjust usage count on a jumbo buffer.
961 */
962static void
2aa9b12f 963bge_jref(void *arg)
984263bc 964{
2aa9b12f
JS
965 struct bge_jslot *entry = (struct bge_jslot *)arg;
966 struct bge_softc *sc = entry->bge_sc;
984263bc
MD
967
968 if (sc == NULL)
969 panic("bge_jref: can't find softc pointer!");
970
16dca0df 971 if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
984263bc
MD
972 panic("bge_jref: asked to reference buffer "
973 "that we don't manage!");
16dca0df 974 } else if (entry->bge_inuse == 0) {
984263bc 975 panic("bge_jref: buffer already free!");
16dca0df
MD
976 } else {
977 atomic_add_int(&entry->bge_inuse, 1);
978 }
984263bc
MD
979}
980
981/*
982 * Release a jumbo buffer.
983 */
984static void
2aa9b12f 985bge_jfree(void *arg)
984263bc 986{
2aa9b12f
JS
987 struct bge_jslot *entry = (struct bge_jslot *)arg;
988 struct bge_softc *sc = entry->bge_sc;
984263bc
MD
989
990 if (sc == NULL)
991 panic("bge_jfree: can't find softc pointer!");
992
16dca0df 993 if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
984263bc 994 panic("bge_jfree: asked to free buffer that we don't manage!");
16dca0df 995 } else if (entry->bge_inuse == 0) {
984263bc 996 panic("bge_jfree: buffer already free!");
16dca0df
MD
997 } else {
998 /*
999 * Possible MP race to 0, use the serializer. The atomic insn
1000 * is still needed for races against bge_jref().
1001 */
1002 lwkt_serialize_enter(&sc->bge_jslot_serializer);
1003 atomic_subtract_int(&entry->bge_inuse, 1);
1004 if (entry->bge_inuse == 0) {
1005 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
1006 entry, jslot_link);
1007 }
1008 lwkt_serialize_exit(&sc->bge_jslot_serializer);
1009 }
984263bc
MD
1010}
1011
1012
1013/*
1014 * Intialize a standard receive ring descriptor.
1015 */
1016static int
1436f9a0 1017bge_newbuf_std(struct bge_softc *sc, int i, int init)
984263bc 1018{
33c39a69 1019 struct mbuf *m_new = NULL;
20c9a969 1020 bus_dma_segment_t seg;
1436f9a0 1021 bus_dmamap_t map;
2de621e9 1022 int error, nsegs;
984263bc 1023
b5523eac 1024 m_new = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR);
1436f9a0
SZ
1025 if (m_new == NULL)
1026 return ENOBUFS;
20c9a969 1027 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
984263bc 1028
0ecb11d7 1029 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
984263bc 1030 m_adj(m_new, ETHER_ALIGN);
20c9a969 1031
2de621e9
SZ
1032 error = bus_dmamap_load_mbuf_segment(sc->bge_cdata.bge_rx_mtag,
1033 sc->bge_cdata.bge_rx_tmpmap, m_new,
1034 &seg, 1, &nsegs, BUS_DMA_NOWAIT);
1035 if (error) {
1436f9a0 1036 m_freem(m_new);
2de621e9 1037 return error;
20c9a969
SZ
1038 }
1039
1436f9a0
SZ
1040 if (!init) {
1041 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1042 sc->bge_cdata.bge_rx_std_dmamap[i],
1043 BUS_DMASYNC_POSTREAD);
1044 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1045 sc->bge_cdata.bge_rx_std_dmamap[i]);
1046 }
20c9a969 1047
1436f9a0
SZ
1048 map = sc->bge_cdata.bge_rx_tmpmap;
1049 sc->bge_cdata.bge_rx_tmpmap = sc->bge_cdata.bge_rx_std_dmamap[i];
1050 sc->bge_cdata.bge_rx_std_dmamap[i] = map;
1051
1052 sc->bge_cdata.bge_rx_std_chain[i].bge_mbuf = m_new;
2de621e9 1053 sc->bge_cdata.bge_rx_std_chain[i].bge_paddr = seg.ds_addr;
1436f9a0
SZ
1054
1055 bge_setup_rxdesc_std(sc, i);
20c9a969 1056 return 0;
984263bc
MD
1057}
1058
1436f9a0
SZ
1059static void
1060bge_setup_rxdesc_std(struct bge_softc *sc, int i)
1061{
1062 struct bge_rxchain *rc;
1063 struct bge_rx_bd *r;
1064
1065 rc = &sc->bge_cdata.bge_rx_std_chain[i];
1066 r = &sc->bge_ldata.bge_rx_std_ring[i];
1067
1068 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bge_paddr);
1069 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bge_paddr);
1070 r->bge_len = rc->bge_mbuf->m_len;
1071 r->bge_idx = i;
1072 r->bge_flags = BGE_RXBDFLAG_END;
1073}
1074
984263bc
MD
1075/*
1076 * Initialize a jumbo receive ring descriptor. This allocates
1077 * a jumbo buffer from the pool managed internally by the driver.
1078 */
1079static int
1436f9a0 1080bge_newbuf_jumbo(struct bge_softc *sc, int i, int init)
984263bc
MD
1081{
1082 struct mbuf *m_new = NULL;
20c9a969 1083 struct bge_jslot *buf;
20c9a969 1084 bus_addr_t paddr;
984263bc 1085
1436f9a0 1086 /* Allocate the mbuf. */
b5523eac 1087 MGETHDR(m_new, init ? M_WAITOK : M_NOWAIT, MT_DATA);
1436f9a0
SZ
1088 if (m_new == NULL)
1089 return ENOBUFS;
2aa9b12f 1090
1436f9a0
SZ
1091 /* Allocate the jumbo buffer */
1092 buf = bge_jalloc(sc);
1093 if (buf == NULL) {
1094 m_freem(m_new);
1095 return ENOBUFS;
984263bc 1096 }
1436f9a0
SZ
1097
1098 /* Attach the buffer to the mbuf. */
1099 m_new->m_ext.ext_arg = buf;
1100 m_new->m_ext.ext_buf = buf->bge_buf;
1101 m_new->m_ext.ext_free = bge_jfree;
1102 m_new->m_ext.ext_ref = bge_jref;
1103 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
1104
1105 m_new->m_flags |= M_EXT;
1106
20c9a969
SZ
1107 m_new->m_data = m_new->m_ext.ext_buf;
1108 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
984263bc 1109
20c9a969 1110 paddr = buf->bge_paddr;
0ecb11d7 1111 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) {
984263bc 1112 m_adj(m_new, ETHER_ALIGN);
20c9a969
SZ
1113 paddr += ETHER_ALIGN;
1114 }
1115
1436f9a0
SZ
1116 /* Save necessary information */
1117 sc->bge_cdata.bge_rx_jumbo_chain[i].bge_mbuf = m_new;
1118 sc->bge_cdata.bge_rx_jumbo_chain[i].bge_paddr = paddr;
1119
984263bc 1120 /* Set up the descriptor. */
1436f9a0
SZ
1121 bge_setup_rxdesc_jumbo(sc, i);
1122 return 0;
1123}
1124
1125static void
1126bge_setup_rxdesc_jumbo(struct bge_softc *sc, int i)
1127{
1128 struct bge_rx_bd *r;
1129 struct bge_rxchain *rc;
20c9a969
SZ
1130
1131 r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
1436f9a0 1132 rc = &sc->bge_cdata.bge_rx_jumbo_chain[i];
984263bc 1133
1436f9a0
SZ
1134 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bge_paddr);
1135 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bge_paddr);
1136 r->bge_len = rc->bge_mbuf->m_len;
1137 r->bge_idx = i;
1138 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
984263bc
MD
1139}
1140
984263bc 1141static int
33c39a69 1142bge_init_rx_ring_std(struct bge_softc *sc)
984263bc 1143{
1436f9a0 1144 int i, error;
984263bc 1145
1436f9a0
SZ
1146 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1147 error = bge_newbuf_std(sc, i, 1);
1148 if (error)
1149 return error;
87c7a7cf 1150 }
984263bc 1151
1436f9a0 1152 sc->bge_std = BGE_STD_RX_RING_CNT - 1;
591dfc77 1153 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
984263bc
MD
1154
1155 return(0);
1156}
1157
1158static void
33c39a69 1159bge_free_rx_ring_std(struct bge_softc *sc)
984263bc
MD
1160{
1161 int i;
1162
1163 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1436f9a0
SZ
1164 struct bge_rxchain *rc = &sc->bge_cdata.bge_rx_std_chain[i];
1165
1166 if (rc->bge_mbuf != NULL) {
ddca511d 1167 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
20c9a969 1168 sc->bge_cdata.bge_rx_std_dmamap[i]);
1436f9a0
SZ
1169 m_freem(rc->bge_mbuf);
1170 rc->bge_mbuf = NULL;
984263bc 1171 }
20c9a969 1172 bzero(&sc->bge_ldata.bge_rx_std_ring[i],
984263bc
MD
1173 sizeof(struct bge_rx_bd));
1174 }
984263bc
MD
1175}
1176
1177static int
33c39a69 1178bge_init_rx_ring_jumbo(struct bge_softc *sc)
984263bc 1179{
984263bc 1180 struct bge_rcb *rcb;
1436f9a0 1181 int i, error;
984263bc
MD
1182
1183 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1436f9a0
SZ
1184 error = bge_newbuf_jumbo(sc, i, 1);
1185 if (error)
1186 return error;
87c7a7cf 1187 }
984263bc 1188
1436f9a0 1189 sc->bge_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
984263bc 1190
20c9a969 1191 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
984263bc
MD
1192 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
1193 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1194
591dfc77 1195 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
984263bc
MD
1196
1197 return(0);
1198}
1199
1200static void
33c39a69 1201bge_free_rx_ring_jumbo(struct bge_softc *sc)
984263bc
MD
1202{
1203 int i;
1204
1205 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1436f9a0
SZ
1206 struct bge_rxchain *rc = &sc->bge_cdata.bge_rx_jumbo_chain[i];
1207
1208 if (rc->bge_mbuf != NULL) {
1209 m_freem(rc->bge_mbuf);
1210 rc->bge_mbuf = NULL;
984263bc 1211 }
20c9a969 1212 bzero(&sc->bge_ldata.bge_rx_jumbo_ring[i],
984263bc
MD
1213 sizeof(struct bge_rx_bd));
1214 }
984263bc
MD
1215}
1216
1217static void
33c39a69 1218bge_free_tx_ring(struct bge_softc *sc)
984263bc
MD
1219{
1220 int i;
1221
984263bc
MD
1222 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1223 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
ddca511d 1224 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
20c9a969 1225 sc->bge_cdata.bge_tx_dmamap[i]);
984263bc
MD
1226 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1227 sc->bge_cdata.bge_tx_chain[i] = NULL;
1228 }
20c9a969 1229 bzero(&sc->bge_ldata.bge_tx_ring[i],
984263bc
MD
1230 sizeof(struct bge_tx_bd));
1231 }
984263bc
MD
1232}
1233
1234static int
33c39a69 1235bge_init_tx_ring(struct bge_softc *sc)
984263bc
MD
1236{
1237 sc->bge_txcnt = 0;
1238 sc->bge_tx_saved_considx = 0;
94db8384
SZ
1239 sc->bge_tx_prodidx = 0;
1240
1241 /* Initialize transmit producer index for host-memory send ring. */
591dfc77 1242 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
984263bc 1243
984263bc
MD
1244 /* 5700 b2 errata */
1245 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
591dfc77 1246 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
984263bc 1247
591dfc77 1248 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
984263bc
MD
1249 /* 5700 b2 errata */
1250 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
591dfc77 1251 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
984263bc
MD
1252
1253 return(0);
1254}
1255
984263bc 1256static void
33c39a69 1257bge_setmulti(struct bge_softc *sc)
984263bc
MD
1258{
1259 struct ifnet *ifp;
1260 struct ifmultiaddr *ifma;
33c39a69 1261 uint32_t hashes[4] = { 0, 0, 0, 0 };
984263bc
MD
1262 int h, i;
1263
1264 ifp = &sc->arpcom.ac_if;
1265
1266 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1267 for (i = 0; i < 4; i++)
1268 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1269 return;
1270 }
1271
1272 /* First, zot all the existing filters. */
1273 for (i = 0; i < 4; i++)
1274 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1275
1276 /* Now program new ones. */
441d34b2 1277 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
984263bc
MD
1278 if (ifma->ifma_addr->sa_family != AF_LINK)
1279 continue;
3b4ec5b8
JS
1280 h = ether_crc32_le(
1281 LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1282 ETHER_ADDR_LEN) & 0x7f;
984263bc
MD
1283 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1284 }
1285
1286 for (i = 0; i < 4; i++)
1287 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
984263bc
MD
1288}
1289
1290/*
1291 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1292 * self-test results.
1293 */
1294static int
33c39a69 1295bge_chipinit(struct bge_softc *sc)
984263bc 1296{
33c39a69 1297 int i;
57b62224 1298 uint32_t dma_rw_ctl, mode_ctl;
c5a5f269 1299 uint16_t val;
984263bc 1300
20c9a969 1301 /* Set endian type before we access any non-PCI registers. */
90ad1c96
SZ
1302 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1303 BGE_INIT | sc->bge_pci_miscctl, 4);
984263bc 1304
984263bc
MD
1305 /*
1306 * Clear the MAC statistics block in the NIC's
1307 * internal memory.
1308 */
1309 for (i = BGE_STATS_BLOCK;
33c39a69 1310 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
984263bc
MD
1311 BGE_MEMWIN_WRITE(sc, i, 0);
1312
1313 for (i = BGE_STATUS_BLOCK;
33c39a69 1314 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
984263bc
MD
1315 BGE_MEMWIN_WRITE(sc, i, 0);
1316
c5a5f269
SZ
1317 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1318 /*
1319 * Fix data corruption caused by non-qword write with WB.
1320 * Fix master abort in PCI mode.
1321 * Fix PCI latency timer.
1322 */
1323 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1324 val |= (1 << 10) | (1 << 12) | (1 << 13);
1325 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1326 }
1327
984263bc 1328 /* Set up the PCI DMA control register. */
b42cdad7 1329 dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD;
0ecb11d7 1330 if (sc->bge_flags & BGE_FLAG_PCIE) {
b42cdad7
SZ
1331 /* PCI-E bus */
1332 /* DMA read watermark not used on PCI-E */
1333 dma_rw_ctl |= (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
0ecb11d7 1334 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
984263bc 1335 /* PCI-X bus */
b42cdad7
SZ
1336 if (sc->bge_asicrev == BGE_ASICREV_BCM5780) {
1337 dma_rw_ctl |= (0x4 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1338 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1339 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1340 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5714) {
1341 dma_rw_ctl |= (0x4 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1342 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1343 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1344 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
984263bc 1345 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
b42cdad7
SZ
1346 uint32_t rd_wat = 0x7;
1347 uint32_t clkctl;
1348
1349 clkctl = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1350 if ((sc->bge_flags & BGE_FLAG_MAXADDR_40BIT) &&
1351 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1352 dma_rw_ctl |=
1353 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1354 } else if (clkctl == 0x6 || clkctl == 0x7) {
1355 dma_rw_ctl |=
1356 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1357 }
1358 if (sc->bge_asicrev == BGE_ASICREV_BCM5703)
1359 rd_wat = 0x4;
984263bc 1360
b42cdad7
SZ
1361 dma_rw_ctl |= (rd_wat << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1362 (3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1363 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1364 } else {
1365 dma_rw_ctl |= (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1366 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1367 dma_rw_ctl |= 0xf;
984263bc 1368 }
0ecb11d7
SZ
1369 } else {
1370 /* Conventional PCI bus */
b42cdad7
SZ
1371 dma_rw_ctl |= (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1372 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1373 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1374 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1375 dma_rw_ctl |= 0xf;
984263bc
MD
1376 }
1377
1378 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
b42cdad7 1379 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
984263bc 1380 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
b42cdad7
SZ
1381 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1382 sc->bge_asicrev == BGE_ASICREV_BCM5701) {
1383 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1384 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1385 }
984263bc
MD
1386 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1387
1388 /*
1389 * Set up general mode register.
1390 */
57b62224 1391 mode_ctl = BGE_DMA_SWAP_OPTIONS|
984263bc 1392 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
57b62224 1393 BGE_MODECTL_TX_NO_PHDR_CSUM;
984263bc 1394
33dd4678
SZ
1395 /*
1396 * BCM5701 B5 have a bug causing data corruption when using
1397 * 64-bit DMA reads, which can be terminated early and then
1398 * completed later as 32-bit accesses, in combination with
1399 * certain bridges.
1400 */
1401 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1402 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
57b62224
SZ
1403 mode_ctl |= BGE_MODECTL_FORCE_PCI32;
1404
1405 /*
1406 * Tell the firmware the driver is running
1407 */
1408 if (sc->bge_asf_mode & ASF_STACKUP)
1409 mode_ctl |= BGE_MODECTL_STACKUP;
1410
1411 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
33dd4678 1412
984263bc
MD
1413 /*
1414 * Disable memory write invalidate. Apparently it is not supported
308dcd8e
SZ
1415 * properly by these devices. Also ensure that INTx isn't disabled,
1416 * as these chips need it even when using MSI.
984263bc 1417 */
308dcd8e
SZ
1418 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1419 (PCIM_CMD_MWRICEN | PCIM_CMD_INTxDIS), 4);
984263bc 1420
984263bc
MD
1421 /* Set the timer prescaler (always 66Mhz) */
1422 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1423
591dfc77
SZ
1424 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1425 DELAY(40); /* XXX */
1426
1427 /* Put PHY into ready state */
1428 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1429 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1430 DELAY(40);
1431 }
1432
984263bc
MD
1433 return(0);
1434}
1435
1436static int
33c39a69 1437bge_blockinit(struct bge_softc *sc)
984263bc
MD
1438{
1439 struct bge_rcb *rcb;
20c9a969
SZ
1440 bus_size_t vrcb;
1441 bge_hostaddr taddr;
0ecb11d7 1442 uint32_t val;
d287a587 1443 int i, limit;
984263bc
MD
1444
1445 /*
1446 * Initialize the memory window pointer register so that
1447 * we can access the first 32K of internal NIC RAM. This will
1448 * allow us to set up the TX send ring RCBs and the RX return
1449 * ring RCBs, plus other things which live in NIC memory.
1450 */
1451 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1452
7e40b8c5
HP
1453 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1454
0ecb11d7 1455 if (!BGE_IS_5705_PLUS(sc)) {
7e40b8c5 1456 /* Configure mbuf memory pool */
0ecb11d7
SZ
1457 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1458 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1459 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1460 else
1461 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
984263bc 1462
7e40b8c5
HP
1463 /* Configure DMA resource pool */
1464 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1465 BGE_DMA_DESCRIPTORS);
1466 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1467 }
984263bc
MD
1468
1469 /* Configure mbuf pool watermarks */
591dfc77 1470 if (!BGE_IS_5705_PLUS(sc)) {
7e40b8c5
HP
1471 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1472 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
591dfc77
SZ
1473 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1474 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1475 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1476 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1477 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1478 } else {
1479 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1480 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1481 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
7e40b8c5 1482 }
984263bc
MD
1483
1484 /* Configure DMA resource watermarks */
1485 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1486 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1487
1488 /* Enable buffer manager */
6ac6e1b9
SZ
1489 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1490 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
984263bc 1491
6ac6e1b9
SZ
1492 /* Poll for buffer manager start indication */
1493 for (i = 0; i < BGE_TIMEOUT; i++) {
1494 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1495 break;
1496 DELAY(10);
1497 }
984263bc 1498
6ac6e1b9
SZ
1499 if (i == BGE_TIMEOUT) {
1500 if_printf(&sc->arpcom.ac_if,
1501 "buffer manager failed to start\n");
1502 return(ENXIO);
984263bc
MD
1503 }
1504
1505 /* Enable flow-through queues */
1506 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1507 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1508
1509 /* Wait until queue initialization is complete */
1510 for (i = 0; i < BGE_TIMEOUT; i++) {
1511 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1512 break;
1513 DELAY(10);
1514 }
1515
1516 if (i == BGE_TIMEOUT) {
c6fd6f3b
JS
1517 if_printf(&sc->arpcom.ac_if,
1518 "flow-through queue init failed\n");
984263bc
MD
1519 return(ENXIO);
1520 }
1521
d287a587
SZ
1522 /*
1523 * Summary of rings supported by the controller:
1524 *
1525 * Standard Receive Producer Ring
1526 * - This ring is used to feed receive buffers for "standard"
1527 * sized frames (typically 1536 bytes) to the controller.
1528 *
1529 * Jumbo Receive Producer Ring
1530 * - This ring is used to feed receive buffers for jumbo sized
1531 * frames (i.e. anything bigger than the "standard" frames)
1532 * to the controller.
1533 *
1534 * Mini Receive Producer Ring
1535 * - This ring is used to feed receive buffers for "mini"
1536 * sized frames to the controller.
1537 * - This feature required external memory for the controller
1538 * but was never used in a production system. Should always
1539 * be disabled.
1540 *
1541 * Receive Return Ring
1542 * - After the controller has placed an incoming frame into a
1543 * receive buffer that buffer is moved into a receive return
1544 * ring. The driver is then responsible to passing the
1545 * buffer up to the stack. Many versions of the controller
1546 * support multiple RR rings.
1547 *
1548 * Send Ring
1549 * - This ring is used for outgoing frames. Many versions of
1550 * the controller support multiple send rings.
1551 */
1552
1553 /* Initialize the standard receive producer ring control block. */
20c9a969
SZ
1554 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1555 rcb->bge_hostaddr.bge_addr_lo =
1556 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1557 rcb->bge_hostaddr.bge_addr_hi =
1558 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
d287a587
SZ
1559 if (BGE_IS_5705_PLUS(sc)) {
1560 /*
1561 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1562 * Bits 15-2 : Reserved (should be 0)
1563 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1564 * Bit 0 : Reserved
1565 */
7e40b8c5 1566 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
d287a587
SZ
1567 } else {
1568 /*
1569 * Ring size is always XXX entries
1570 * Bits 31-16: Maximum RX frame size
1571 * Bits 15-2 : Reserved (should be 0)
1572 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1573 * Bit 0 : Reserved
1574 */
7e40b8c5
HP
1575 rcb->bge_maxlen_flags =
1576 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
d287a587 1577 }
0ecb11d7 1578 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
d287a587 1579 /* Write the standard receive producer ring control block. */
984263bc
MD
1580 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1581 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1582 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1583 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
d287a587
SZ
1584 /* Reset the standard receive producer ring producer index. */
1585 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
984263bc
MD
1586
1587 /*
d287a587
SZ
1588 * Initialize the jumbo RX producer ring control
1589 * block. We set the 'ring disabled' bit in the
1590 * flags field until we're actually ready to start
984263bc
MD
1591 * using this ring (i.e. once we set the MTU
1592 * high enough to require it).
1593 */
0ecb11d7 1594 if (BGE_IS_JUMBO_CAPABLE(sc)) {
20c9a969 1595 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
d287a587 1596 /* Get the jumbo receive producer ring RCB parameters. */
20c9a969
SZ
1597 rcb->bge_hostaddr.bge_addr_lo =
1598 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1599 rcb->bge_hostaddr.bge_addr_hi =
1600 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
7e40b8c5
HP
1601 rcb->bge_maxlen_flags =
1602 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1603 BGE_RCB_FLAG_RING_DISABLED);
0ecb11d7 1604 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
7e40b8c5
HP
1605 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1606 rcb->bge_hostaddr.bge_addr_hi);
1607 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1608 rcb->bge_hostaddr.bge_addr_lo);
d287a587 1609 /* Program the jumbo receive producer ring RCB parameters. */
7e40b8c5
HP
1610 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1611 rcb->bge_maxlen_flags);
1612 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
d287a587
SZ
1613 /* Reset the jumbo receive producer ring producer index. */
1614 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1615 }
7e40b8c5 1616
d287a587
SZ
1617 /* Disable the mini receive producer ring RCB. */
1618 if (BGE_IS_5700_FAMILY(sc)) {
20c9a969 1619 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
7e40b8c5
HP
1620 rcb->bge_maxlen_flags =
1621 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1622 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1623 rcb->bge_maxlen_flags);
d287a587
SZ
1624 /* Reset the mini receive producer ring producer index. */
1625 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
7e40b8c5 1626 }
984263bc 1627
54919593
SZ
1628 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
1629 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
1630 (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
1631 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
1632 sc->bge_chipid == BGE_CHIPID_BCM5906_A2)) {
1633 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1634 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1635 }
1636
984263bc 1637 /*
d287a587
SZ
1638 * The BD ring replenish thresholds control how often the
1639 * hardware fetches new BD's from the producer rings in host
1640 * memory. Setting the value too low on a busy system can
1641 * starve the hardware and recue the throughpout.
1642 *
984263bc
MD
1643 * Set the BD ring replentish thresholds. The recommended
1644 * values are 1/8th the number of descriptors allocated to
1645 * each ring.
1646 */
0ecb11d7
SZ
1647 if (BGE_IS_5705_PLUS(sc))
1648 val = 8;
1649 else
1650 val = BGE_STD_RX_RING_CNT / 8;
1651 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
127003d4
SZ
1652 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1653 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1654 BGE_JUMBO_RX_RING_CNT/8);
1655 }
984263bc
MD
1656
1657 /*
d287a587
SZ
1658 * Disable all send rings by setting the 'ring disabled' bit
1659 * in the flags field of all the TX send ring control blocks,
1660 * located in NIC memory.
984263bc 1661 */
d287a587
SZ
1662 if (!BGE_IS_5705_PLUS(sc)) {
1663 /* 5700 to 5704 had 16 send rings. */
1664 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
1665 } else {
1666 limit = 1;
1667 }
20c9a969 1668 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
d287a587 1669 for (i = 0; i < limit; i++) {
20c9a969
SZ
1670 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1671 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1672 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1673 vrcb += sizeof(struct bge_rcb);
984263bc
MD
1674 }
1675
d287a587 1676 /* Configure send ring RCB 0 (we use only the first ring) */
20c9a969
SZ
1677 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1678 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1679 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1680 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1681 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1682 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
d287a587
SZ
1683 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1684 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
984263bc 1685
d287a587
SZ
1686 /*
1687 * Disable all receive return rings by setting the
1688 * 'ring diabled' bit in the flags field of all the receive
1689 * return ring control blocks, located in NIC memory.
1690 */
1691 if (!BGE_IS_5705_PLUS(sc))
1692 limit = BGE_RX_RINGS_MAX;
1693 else if (sc->bge_asicrev == BGE_ASICREV_BCM5755)
1694 limit = 4;
1695 else
1696 limit = 1;
1697 /* Disable all receive return rings. */
20c9a969 1698 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
d287a587 1699 for (i = 0; i < limit; i++) {
20c9a969
SZ
1700 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1701 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1702 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
d287a587 1703 BGE_RCB_FLAG_RING_DISABLED);
20c9a969 1704 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
591dfc77 1705 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
33c39a69 1706 (i * (sizeof(uint64_t))), 0);
20c9a969 1707 vrcb += sizeof(struct bge_rcb);
984263bc
MD
1708 }
1709
984263bc 1710 /*
d287a587
SZ
1711 * Set up receive return ring 0. Note that the NIC address
1712 * for RX return rings is 0x0. The return rings live entirely
1713 * within the host, so the nicaddr field in the RCB isn't used.
984263bc 1714 */
20c9a969
SZ
1715 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1716 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1717 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1718 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
d287a587 1719 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
20c9a969
SZ
1720 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1721 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
984263bc
MD
1722
1723 /* Set random backoff seed for TX */
1724 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
0bf9631e
SZ
1725 (sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1726 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1727 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5]) &
984263bc
MD
1728 BGE_TX_BACKOFF_SEED_MASK);
1729
1730 /* Set inter-packet gap */
1731 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1732
1733 /*
1734 * Specify which ring to use for packets that don't match
1735 * any RX rules.
1736 */
1737 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1738
1739 /*
1740 * Configure number of RX lists. One interrupt distribution
1741 * list, sixteen active lists, one bad frames class.
1742 */
1743 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1744
1745 /* Inialize RX list placement stats mask. */
1746 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1747 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1748
1749 /* Disable host coalescing until we get it set up */
1750 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1751
1752 /* Poll to make sure it's shut down. */
1753 for (i = 0; i < BGE_TIMEOUT; i++) {
1754 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1755 break;
1756 DELAY(10);
1757 }
1758
1759 if (i == BGE_TIMEOUT) {
c6fd6f3b
JS
1760 if_printf(&sc->arpcom.ac_if,
1761 "host coalescing engine failed to idle\n");
984263bc
MD
1762 return(ENXIO);
1763 }
1764
1765 /* Set up host coalescing defaults */
1766 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1767 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
90ad1c96
SZ
1768 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_coal_bds);
1769 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_coal_bds);
0ecb11d7 1770 if (!BGE_IS_5705_PLUS(sc)) {
90ad1c96
SZ
1771 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT,
1772 sc->bge_rx_coal_ticks_int);
1773 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT,
1774 sc->bge_tx_coal_ticks_int);
7e40b8c5 1775 }
e6ad4b47
SZ
1776 /*
1777 * NOTE:
1778 * The datasheet (57XX-PG105-R) says BCM5705+ do not
1779 * have following two registers; obviously it is wrong.
1780 */
90ad1c96
SZ
1781 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, sc->bge_rx_coal_bds_int);
1782 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, sc->bge_tx_coal_bds_int);
984263bc
MD
1783
1784 /* Set up address of statistics block */
0ecb11d7 1785 if (!BGE_IS_5705_PLUS(sc)) {
20c9a969
SZ
1786 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1787 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
7e40b8c5 1788 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
20c9a969 1789 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
7e40b8c5
HP
1790
1791 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1792 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1793 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1794 }
984263bc
MD
1795
1796 /* Set up address of status block */
a1620bc8 1797 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
20c9a969
SZ
1798 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1799 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
984263bc 1800 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
20c9a969 1801 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
984263bc 1802
8b1932b2
SZ
1803 /*
1804 * Set up status block partail update size.
1805 *
1806 * Because only single TX ring, RX produce ring and Rx return ring
1807 * are used, ask device to update only minimum part of status block
1808 * except for BCM5700 AX/BX, whose status block partial update size
1809 * can't be configured.
1810 */
1811 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1812 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
1813 /* XXX Actually reserved on BCM5700 AX/BX */
1814 val = BGE_STATBLKSZ_FULL;
1815 } else {
1816 val = BGE_STATBLKSZ_32BYTE;
1817 }
90ad1c96 1818#if 0
16245619
SZ
1819 /*
1820 * Does not seem to have visible effect in both
1821 * bulk data (1472B UDP datagram) and tiny data
1822 * (18B UDP datagram) TX tests.
1823 */
1824 if (!BGE_IS_CRIPPLED(sc))
1825 val |= BGE_HCCMODE_CLRTICK_TX;
90ad1c96 1826#endif
8b1932b2 1827
984263bc 1828 /* Turn on host coalescing state machine */
8b1932b2 1829 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
984263bc
MD
1830
1831 /* Turn on RX BD completion state machine and enable attentions */
1832 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1833 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1834
1835 /* Turn on RX list placement state machine */
1836 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1837
1838 /* Turn on RX list selector state machine. */
0ecb11d7 1839 if (!BGE_IS_5705_PLUS(sc))
7e40b8c5 1840 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
984263bc 1841
4d38e186
SZ
1842 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1843 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1844 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1845 BGE_MACMODE_FRMHDR_DMA_ENB;
1846
1847 if (sc->bge_flags & BGE_FLAG_TBI)
1848 val |= BGE_PORTMODE_TBI;
1849 else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
1850 val |= BGE_PORTMODE_GMII;
1851 else
1852 val |= BGE_PORTMODE_MII;
1853
ea320e53
SZ
1854 /* Allow APE to send/receive frames. */
1855 if (sc->bge_mfw_flags & BGE_MFW_ON_APE)
1856 val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
1857
984263bc 1858 /* Turn on DMA, clear stats */
4d38e186 1859 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
b87c7034 1860 DELAY(40);
984263bc
MD
1861
1862 /* Set misc. local control, enable interrupts on attentions */
d05296cf 1863 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
984263bc
MD
1864
1865#ifdef notdef
1866 /* Assert GPIO pins for PHY reset */
1867 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1868 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1869 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1870 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1871#endif
1872
1873 /* Turn on DMA completion state machine */
0ecb11d7 1874 if (!BGE_IS_5705_PLUS(sc))
7e40b8c5 1875 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
984263bc
MD
1876
1877 /* Turn on write DMA state machine */
0ecb11d7 1878 val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
832863d2
SZ
1879 if (BGE_IS_5755_PLUS(sc)) {
1880 /* Enable host coalescing bug fix. */
1881 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1882 }
ef016c7e
SZ
1883 if (sc->bge_asicrev == BGE_ASICREV_BCM5785) {
1884 /* Request larger DMA burst size to get better performance. */
1885 val |= BGE_WDMAMODE_BURST_ALL_DATA;
1886 }
0ecb11d7 1887 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
671bd7ed
SZ
1888 DELAY(40);
1889
b4ecb050
SZ
1890 if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
1891 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1892 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1893 sc->bge_asicrev == BGE_ASICREV_BCM57780) {
1894 /*
1895 * Enable fix for read DMA FIFO overruns.
1896 * The fix is to limit the number of RX BDs
1897 * the hardware would fetch at a fime.
1898 */
1899 val = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL);
1900 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL,
1901 val| BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
1902 }
1903
984263bc 1904 /* Turn on read DMA state machine */
671bd7ed 1905 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
f47afe1a
MN
1906 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1907 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1908 sc->bge_asicrev == BGE_ASICREV_BCM57780)
1909 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1910 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1911 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
671bd7ed
SZ
1912 if (sc->bge_flags & BGE_FLAG_PCIE)
1913 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
e92f005c
SZ
1914 if (sc->bge_flags & BGE_FLAG_TSO)
1915 val |= BGE_RDMAMODE_TSO4_ENABLE;
671bd7ed
SZ
1916 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1917 DELAY(40);
984263bc
MD
1918
1919 /* Turn on RX data completion state machine */
1920 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1921
1922 /* Turn on RX BD initiator state machine */
1923 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1924
1925 /* Turn on RX data and RX BD initiator state machine */
1926 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1927
1928 /* Turn on Mbuf cluster free state machine */
0ecb11d7 1929 if (!BGE_IS_5705_PLUS(sc))
7e40b8c5 1930 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
984263bc
MD
1931
1932 /* Turn on send BD completion state machine */
1933 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1934
1935 /* Turn on send data completion state machine */
f47afe1a
MN
1936 val = BGE_SDCMODE_ENABLE;
1937 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
1938 val |= BGE_SDCMODE_CDELAY;
1939 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
984263bc
MD
1940
1941 /* Turn on send data initiator state machine */
e92f005c
SZ
1942 if (sc->bge_flags & BGE_FLAG_TSO)
1943 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
1944 BGE_SDIMODE_HW_LSO_PRE_DMA);
1945 else
1946 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
984263bc
MD
1947
1948 /* Turn on send BD initiator state machine */
1949 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1950
1951 /* Turn on send BD selector state machine */
1952 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1953
1954 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1955 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1956 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1957
1958 /* ack/clear link change events */
1959 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
7e40b8c5
HP
1960 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1961 BGE_MACSTAT_LINK_CHANGED);
20c9a969 1962 CSR_WRITE_4(sc, BGE_MI_STS, 0);
984263bc 1963
2dd0af35
SZ
1964 /*
1965 * Enable attention when the link has changed state for
1966 * devices that use auto polling.
1967 */
0ecb11d7 1968 if (sc->bge_flags & BGE_FLAG_TBI) {
984263bc
MD
1969 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1970 } else {
2dd0af35
SZ
1971 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
1972 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
1973 DELAY(80);
1974 }
db861466
SZ
1975 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1976 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
984263bc
MD
1977 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1978 BGE_EVTENB_MI_INTERRUPT);
db861466 1979 }
984263bc
MD
1980 }
1981
db861466
SZ
1982 /*
1983 * Clear any pending link state attention.
1984 * Otherwise some link state change events may be lost until attention
1985 * is cleared by bge_intr() -> bge_softc.bge_link_upd() sequence.
1986 * It's not necessary on newer BCM chips - perhaps enabling link
1987 * state change attentions implies clearing pending attention.
1988 */
1989 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1990 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1991 BGE_MACSTAT_LINK_CHANGED);
1992
984263bc
MD
1993 /* Enable link state change attentions. */
1994 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1995
1996 return(0);
1997}
1998
1999/*
2000 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2001 * against our list and return its name if we find a match. Note
2002 * that since the Broadcom controller contains VPD support, we
2003 * can get the device name string from the controller itself instead
2004 * of the compiled-in string. This is a little slow, but it guarantees
2005 * we'll always announce the right product name.
2006 */
2007static int
33c39a69 2008bge_probe(device_t dev)
984263bc 2009{
d265721a 2010 const struct bge_type *t;
33c39a69
JS
2011 uint16_t product, vendor;
2012
2013 product = pci_get_device(dev);
2014 vendor = pci_get_vendor(dev);
2015
2016 for (t = bge_devs; t->bge_name != NULL; t++) {
2017 if (vendor == t->bge_vid && product == t->bge_did)
2018 break;
2019 }
33c39a69
JS
2020 if (t->bge_name == NULL)
2021 return(ENXIO);
984263bc 2022
d265721a 2023 device_set_desc(dev, t->bge_name);
33c39a69 2024 return(0);
984263bc
MD
2025}
2026
2027static int
33c39a69 2028bge_attach(device_t dev)
984263bc 2029{
984263bc
MD
2030 struct ifnet *ifp;
2031 struct bge_softc *sc;
26595b18 2032 struct sysctl_ctx_list *ctx;
60c9c7dd 2033 struct sysctl_oid *tree;
6b4f9f65
SZ
2034 uint32_t hwcfg = 0, misccfg;
2035 int error = 0, rid, capmask;
0a8b5977 2036 uint8_t ether_addr[ETHER_ADDR_LEN];
6b4f9f65 2037 uint16_t product, vendor;
90ad1c96 2038 driver_intr_t *intr_func;
f31c6e4d 2039 uintptr_t mii_priv = 0;
308dcd8e
SZ
2040 u_int intr_flags;
2041 int msi_enable;
984263bc 2042
984263bc 2043 sc = device_get_softc(dev);
984263bc 2044 sc->bge_dev = dev;
3774a98b 2045 callout_init_mp(&sc->bge_stat_timer);
16dca0df 2046 lwkt_serialize_init(&sc->bge_jslot_serializer);
984263bc 2047
ea320e53 2048 sc->bge_func_addr = pci_get_function(dev);
e92f005c
SZ
2049 product = pci_get_device(dev);
2050 vendor = pci_get_vendor(dev);
2051
591dfc77
SZ
2052#ifndef BURN_BRIDGES
2053 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
2054 uint32_t irq, mem;
2055
2056 irq = pci_read_config(dev, PCIR_INTLINE, 4);
2057 mem = pci_read_config(dev, BGE_PCI_BAR0, 4);
2058
2059 device_printf(dev, "chip is in D%d power mode "
2060 "-- setting to D0\n", pci_get_powerstate(dev));
2061
2062 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
2063
2064 pci_write_config(dev, PCIR_INTLINE, irq, 4);
2065 pci_write_config(dev, BGE_PCI_BAR0, mem, 4);
2066 }
2067#endif /* !BURN_BRIDGE */
2068
984263bc
MD
2069 /*
2070 * Map control/status registers.
2071 */
cc8ddf9e 2072 pci_enable_busmaster(dev);
984263bc
MD
2073
2074 rid = BGE_PCI_BAR0;
cc8ddf9e
JS
2075 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2076 RF_ACTIVE);
984263bc
MD
2077
2078 if (sc->bge_res == NULL) {
c6fd6f3b 2079 device_printf(dev, "couldn't map memory\n");
baf731bb 2080 return ENXIO;
984263bc
MD
2081 }
2082
2083 sc->bge_btag = rman_get_bustag(sc->bge_res);
2084 sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
984263bc 2085
d265721a 2086 /* Save various chip information */
9a6ee7e2 2087 sc->bge_chipid =
f47afe1a
MN
2088 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2089 BGE_PCIMISCCTL_ASICREV_SHIFT;
15691e09
SZ
2090 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
2091 /* All chips, which use BGE_PCI_PRODID_ASICREV, have CPMU */
2092 sc->bge_flags |= BGE_FLAG_CPMU;
f47afe1a 2093 sc->bge_chipid = pci_read_config(dev, BGE_PCI_PRODID_ASICREV, 4);
15691e09 2094 }
9a6ee7e2
JS
2095 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2096 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2097
0ecb11d7
SZ
2098 /* Save chipset family. */
2099 switch (sc->bge_asicrev) {
f47afe1a
MN
2100 case BGE_ASICREV_BCM5755:
2101 case BGE_ASICREV_BCM5761:
2102 case BGE_ASICREV_BCM5784:
2103 case BGE_ASICREV_BCM5785:
2104 case BGE_ASICREV_BCM5787:
2105 case BGE_ASICREV_BCM57780:
2106 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
2107 BGE_FLAG_5705_PLUS;
2108 break;
2109
0ecb11d7
SZ
2110 case BGE_ASICREV_BCM5700:
2111 case BGE_ASICREV_BCM5701:
2112 case BGE_ASICREV_BCM5703:
2113 case BGE_ASICREV_BCM5704:
2114 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2115 break;
2116
2117 case BGE_ASICREV_BCM5714_A0:
2118 case BGE_ASICREV_BCM5780:
2119 case BGE_ASICREV_BCM5714:
2120 sc->bge_flags |= BGE_FLAG_5714_FAMILY;
2121 /* Fall through */
2122
2123 case BGE_ASICREV_BCM5750:
2124 case BGE_ASICREV_BCM5752:
591dfc77 2125 case BGE_ASICREV_BCM5906:
0ecb11d7
SZ
2126 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2127 /* Fall through */
2128
2129 case BGE_ASICREV_BCM5705:
2130 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2131 break;
2132 }
9a6ee7e2 2133
591dfc77
SZ
2134 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
2135 sc->bge_flags |= BGE_FLAG_NO_EEPROM;
2136
57b62224
SZ
2137 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
2138 sc->bge_flags |= BGE_FLAG_APE;
2139
5225ba10
SZ
2140 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK;
2141 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2142 (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2143 misccfg == BGE_MISCCFG_BOARD_ID_5788M))
2144 sc->bge_flags |= BGE_FLAG_5788;
2145
e0b35c1f
SZ
2146 /* BCM5755 or higher and BCM5906 have short DMA bug. */
2147 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
2148 sc->bge_flags |= BGE_FLAG_SHORTDMA;
2149
4849f4a3
SZ
2150 /*
2151 * Increase STD RX ring prod index by at most 8 for BCM5750,
2152 * BCM5752 and BCM5755 to workaround hardware errata.
2153 */
2154 if (sc->bge_asicrev == BGE_ASICREV_BCM5750 ||
2155 sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
2156 sc->bge_asicrev == BGE_ASICREV_BCM5755)
2157 sc->bge_rx_wreg = 8;
2158
3c858e35
SZ
2159 /*
2160 * Check if this is a PCI-X or PCI Express device.
2161 */
2162 if (BGE_IS_5705_PLUS(sc)) {
2163 if (pci_is_pcie(dev)) {
2164 sc->bge_flags |= BGE_FLAG_PCIE;
2165 sc->bge_pciecap = pci_get_pciecap_ptr(sc->bge_dev);
2166 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096);
2167 }
2168 } else {
2169 /*
2170 * Check if the device is in PCI-X Mode.
2171 * (This bit is not valid on PCI Express controllers.)
2172 */
2173 if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
2174 BGE_PCISTATE_PCI_BUSMODE) == 0) {
2175 sc->bge_flags |= BGE_FLAG_PCIX;
2176 sc->bge_pcixcap = pci_get_pcixcap_ptr(sc->bge_dev);
2177 sc->bge_mbox_reorder = device_getenv_int(sc->bge_dev,
2178 "mbox_reorder", 0);
2179 }
2180 }
2181 device_printf(dev, "CHIP ID 0x%08x; "
2182 "ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
2183 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
2184 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X"
2185 : ((sc->bge_flags & BGE_FLAG_PCIE) ?
2186 "PCI-E" : "PCI"));
2187
2188 /*
2189 * The 40bit DMA bug applies to the 5714/5715 controllers and is
2190 * not actually a MAC controller bug but an issue with the embedded
2191 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
2192 */
b42cdad7
SZ
2193 if ((sc->bge_flags & BGE_FLAG_PCIX) &&
2194 (BGE_IS_5714_FAMILY(sc) || device_getenv_int(dev, "dma40b", 0)))
3c858e35
SZ
2195 sc->bge_flags |= BGE_FLAG_MAXADDR_40BIT;
2196
9a6ee7e2 2197 /*
3c858e35
SZ
2198 * When using the BCM5701 in PCI-X mode, data corruption has
2199 * been observed in the first few bytes of some received packets.
2200 * Aligning the packet buffer in memory eliminates the corruption.
2201 * Unfortunately, this misaligns the packet payloads. On platforms
2202 * which do not support unaligned accesses, we will realign the
2203 * payloads by copying the received packets.
9a6ee7e2 2204 */
3c858e35
SZ
2205 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
2206 (sc->bge_flags & BGE_FLAG_PCIX))
2207 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
9a6ee7e2 2208
14852ccc 2209 if (!BGE_IS_CRIPPLED(sc)) {
90ad1c96
SZ
2210 if (device_getenv_int(dev, "status_tag", 1)) {
2211 sc->bge_flags |= BGE_FLAG_STATUS_TAG;
2212 sc->bge_pci_miscctl = BGE_PCIMISCCTL_TAGGED_STATUS;
2213 if (bootverbose)
2214 device_printf(dev, "enable status tag\n");
2215 }
2216 }
2217
e92f005c
SZ
2218 if (BGE_IS_5755_PLUS(sc)) {
2219 /*
2220 * BCM5754 and BCM5787 shares the same ASIC id so
2221 * explicit device id check is required.
2222 * Due to unknown reason TSO does not work on BCM5755M.
2223 */
2224 if (product != PCI_PRODUCT_BROADCOM_BCM5754 &&
2225 product != PCI_PRODUCT_BROADCOM_BCM5754M &&
2226 product != PCI_PRODUCT_BROADCOM_BCM5755M)
2227 sc->bge_flags |= BGE_FLAG_TSO;
2228 }
2229
3c858e35
SZ
2230 /*
2231 * Set various PHY quirk flags.
2232 */
6b4f9f65 2233
6b4f9f65
SZ
2234 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
2235 sc->bge_asicrev == BGE_ASICREV_BCM5701) &&
2236 pci_get_subvendor(dev) == PCI_VENDOR_DELL)
f31c6e4d 2237 mii_priv |= BRGPHY_FLAG_NO_3LED;
6b4f9f65
SZ
2238
2239 capmask = MII_CAPMASK_DEFAULT;
2240 if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 &&
2241 (misccfg == 0x4000 || misccfg == 0x8000)) ||
2242 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2243 vendor == PCI_VENDOR_BROADCOM &&
2244 (product == PCI_PRODUCT_BROADCOM_BCM5901 ||
2245 product == PCI_PRODUCT_BROADCOM_BCM5901A2 ||
2246 product == PCI_PRODUCT_BROADCOM_BCM5705F)) ||
2247 (vendor == PCI_VENDOR_BROADCOM &&
2248 (product == PCI_PRODUCT_BROADCOM_BCM5751F ||
2249 product == PCI_PRODUCT_BROADCOM_BCM5753F ||
2250 product == PCI_PRODUCT_BROADCOM_BCM5787F)) ||
2251 product == PCI_PRODUCT_BROADCOM_BCM57790 ||
2252 sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2253 /* 10/100 only */
2254 capmask &= ~BMSR_EXTSTAT;
2255 }
2256
f31c6e4d 2257 mii_priv |= BRGPHY_FLAG_WIRESPEED;
0ecb11d7
SZ
2258 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
2259 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2260 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2261 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
2262 sc->bge_asicrev == BGE_ASICREV_BCM5906)
f31c6e4d 2263 mii_priv &= ~BRGPHY_FLAG_WIRESPEED;
0ecb11d7
SZ
2264
2265 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2266 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
f31c6e4d 2267 mii_priv |= BRGPHY_FLAG_CRC_BUG;
0ecb11d7
SZ
2268
2269 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2270 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
f31c6e4d 2271 mii_priv |= BRGPHY_FLAG_ADC_BUG;
0ecb11d7
SZ
2272
2273 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
f31c6e4d
SZ
2274 mii_priv |= BRGPHY_FLAG_5704_A0;
2275
2276 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
2277 mii_priv |= BRGPHY_FLAG_5906;
0ecb11d7 2278
6b4f9f65
SZ
2279 if (BGE_IS_5705_PLUS(sc) &&
2280 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2281 /* sc->bge_asicrev != BGE_ASICREV_BCM5717 && */
2282 sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
2283 /* sc->bge_asicrev != BGE_ASICREV_BCM57765 && */
2284 sc->bge_asicrev != BGE_ASICREV_BCM57780) {
0ecb11d7 2285 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
f47afe1a
MN
2286 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2287 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
0ecb11d7 2288 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2d79280f
SZ
2289 if (product != PCI_PRODUCT_BROADCOM_BCM5722 &&
2290 product != PCI_PRODUCT_BROADCOM_BCM5756)
f31c6e4d 2291 mii_priv |= BRGPHY_FLAG_JITTER_BUG;
2d79280f 2292 if (product == PCI_PRODUCT_BROADCOM_BCM5755M)
f31c6e4d 2293 mii_priv |= BRGPHY_FLAG_ADJUST_TRIM;
6b4f9f65 2294 } else {
f31c6e4d 2295 mii_priv |= BRGPHY_FLAG_BER_BUG;
9a6ee7e2
JS
2296 }
2297 }
2298
ea320e53
SZ
2299 /*
2300 * Chips with APE need BAR2 access for APE registers/memory.
2301 */
2302 if (sc->bge_flags & BGE_FLAG_APE) {
2303 uint32_t pcistate;
2304
2305 rid = PCIR_BAR(2);
2306 sc->bge_res2 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2307 RF_ACTIVE);
2308 if (sc->bge_res2 == NULL) {
2309 device_printf(dev, "couldn't map BAR2 memory\n");
2310 error = ENXIO;
2311 goto fail;
2312 }
2313
2314 /* Enable APE register/memory access by host driver. */
2315 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2316 pcistate |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
2317 BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
2318 BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
2319 pci_write_config(dev, BGE_PCI_PCISTATE, pcistate, 4);
2320
2321 bge_ape_lock_init(sc);
2322 bge_ape_read_fw_ver(sc);
2323 }
2324
308dcd8e
SZ
2325 /*
2326 * Allocate interrupt
2327 */
2328 msi_enable = bge_msi_enable;
2329 if ((sc->bge_flags & BGE_FLAG_STATUS_TAG) == 0) {
2330 /* If "tagged status" is disabled, don't enable MSI */
2331 msi_enable = 0;
2332 } else if (msi_enable) {
2333 msi_enable = 0; /* Disable by default */
2334 if (BGE_IS_575X_PLUS(sc)) {
2335 msi_enable = 1;
2336 /* XXX we filter all 5714 chips */
2337 if (sc->bge_asicrev == BGE_ASICREV_BCM5714 ||
2338 (sc->bge_asicrev == BGE_ASICREV_BCM5750 &&
2339 (sc->bge_chiprev == BGE_CHIPREV_5750_AX ||
2340 sc->bge_chiprev == BGE_CHIPREV_5750_BX)))
2341 msi_enable = 0;
2342 else if (BGE_IS_5755_PLUS(sc) ||
2343 sc->bge_asicrev == BGE_ASICREV_BCM5906)
2344 sc->bge_flags |= BGE_FLAG_ONESHOT_MSI;
2345 }
2346 }
2347 if (msi_enable) {
2348 if (pci_find_extcap(dev, PCIY_MSI, &sc->bge_msicap)) {
2349 device_printf(dev, "no MSI capability\n");
2350 msi_enable = 0;
2351 }
2352 }
2353
2354 sc->bge_irq_type = pci_alloc_1intr(dev, msi_enable, &sc->bge_irq_rid,
2355 &intr_flags);
2356
2357 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->bge_irq_rid,
2358 intr_flags);
0ecb11d7
SZ
2359 if (sc->bge_irq == NULL) {
2360 device_printf(dev, "couldn't map interrupt\n");
2361 error = ENXIO;
2362 goto fail;
2363 }
2364
308dcd8e
SZ
2365 if (sc->bge_irq_type == PCI_INTR_TYPE_MSI)
2366 bge_enable_msi(sc);
2367 else
2368 sc->bge_flags &= ~BGE_FLAG_ONESHOT_MSI;
2369
3c858e35 2370 /* Initialize if_name earlier, so if_printf could be used */
c6fd6f3b
JS
2371 ifp = &sc->arpcom.ac_if;
2372 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
984263bc 2373
57b62224
SZ
2374 sc->bge_asf_mode = 0;
2375 /* No ASF if APE present. */
2376 if ((sc->bge_flags & BGE_FLAG_APE) == 0) {
2377 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) ==
2378 BGE_SRAM_DATA_SIG_MAGIC)) {
2379 if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG) &
2380 BGE_HWCFG_ASF) {
2381 sc->bge_asf_mode |= ASF_ENABLE;
2382 sc->bge_asf_mode |= ASF_STACKUP;
2383 if (BGE_IS_575X_PLUS(sc))
2384 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
2385 }
2386 }
2387 }
2388
2389 /*
2390 * Try to reset the chip.
2391 */
2392 bge_stop_fw(sc);
2393 bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN);
984263bc 2394 bge_reset(sc);
57b62224
SZ
2395 bge_sig_legacy(sc, BGE_RESET_SHUTDOWN);
2396 bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN);
984263bc
MD
2397
2398 if (bge_chipinit(sc)) {
c6fd6f3b 2399 device_printf(dev, "chip initialization failed\n");
984263bc
MD
2400 error = ENXIO;
2401 goto fail;
2402 }
2403
2404 /*
591dfc77 2405 * Get station address
984263bc 2406 */
591dfc77
SZ
2407 error = bge_get_eaddr(sc, ether_addr);
2408 if (error) {
c6fd6f3b 2409 device_printf(dev, "failed to read station address\n");
984263bc
MD
2410 goto fail;
2411 }
2412
20c9a969 2413 /* 5705/5750 limits RX return ring to 512 entries. */
0ecb11d7 2414 if (BGE_IS_5705_PLUS(sc))
20c9a969
SZ
2415 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2416 else
2417 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
984263bc 2418
20c9a969
SZ
2419 error = bge_dma_alloc(sc);
2420 if (error)
984263bc 2421 goto fail;
984263bc
MD
2422
2423 /* Set default tuneable values. */
2424 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
90ad1c96
SZ
2425 sc->bge_rx_coal_ticks = BGE_RX_COAL_TICKS_DEF;
2426 sc->bge_tx_coal_ticks = BGE_TX_COAL_TICKS_DEF;
2427 sc->bge_rx_coal_bds = BGE_RX_COAL_BDS_DEF;
2428 sc->bge_tx_coal_bds = BGE_TX_COAL_BDS_DEF;
2429 if (sc->bge_flags & BGE_FLAG_STATUS_TAG) {
2430 sc->bge_rx_coal_ticks_int = BGE_RX_COAL_TICKS_DEF;
2431 sc->bge_tx_coal_ticks_int = BGE_TX_COAL_TICKS_DEF;
2432 sc->bge_rx_coal_bds_int = BGE_RX_COAL_BDS_DEF;
2433 sc->bge_tx_coal_bds_int = BGE_TX_COAL_BDS_DEF;
2434 } else {
2435 sc->bge_rx_coal_ticks_int = BGE_RX_COAL_TICKS_MIN;
2436 sc->bge_tx_coal_ticks_int = BGE_TX_COAL_TICKS_MIN;
2437 sc->bge_rx_coal_bds_int = BGE_RX_COAL_BDS_MIN;
2438 sc->bge_tx_coal_bds_int = BGE_TX_COAL_BDS_MIN;
2439 }
48929615 2440 sc->bge_tx_wreg = BGE_TX_WREG_NSEGS;
984263bc 2441
e92f005c
SZ
2442 /* Set up TX spare and reserved descriptor count */
2443 if (sc->bge_flags & BGE_FLAG_TSO) {
2444 sc->bge_txspare = BGE_NSEG_SPARE_TSO;
2445 sc->bge_txrsvd = BGE_NSEG_RSVD_TSO;
2446 } else {
2447 sc->bge_txspare = BGE_NSEG_SPARE;
2448 sc->bge_txrsvd = BGE_NSEG_RSVD;
2449 }
2450
984263bc 2451 /* Set up ifnet structure */
984263bc 2452 ifp->if_softc = sc;
984263bc
MD
2453 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2454 ifp->if_ioctl = bge_ioctl;
984263bc 2455 ifp->if_start = bge_start;
7e1b2526
SZ
2456#ifdef IFPOLL_ENABLE
2457 ifp->if_npoll = bge_npoll;
315fe0ee 2458#endif
984263bc
MD
2459 ifp->if_watchdog = bge_watchdog;
2460 ifp->if_init = bge_init;
2461 ifp->if_mtu = ETHERMTU;
cb623c48 2462 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
14929979 2463 ifp->if_nmbclusters = BGE_STD_RX_RING_CNT;
936ff230
JS
2464 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
2465 ifq_set_ready(&ifp->if_snd);
cb623c48
SZ
2466
2467 /*
2468 * 5700 B0 chips do not support checksumming correctly due
2469 * to hardware bugs.
2470 */
2471 if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) {
2472 ifp->if_capabilities |= IFCAP_HWCSUM;
e92f005c
SZ
2473 ifp->if_hwassist |= BGE_CSUM_FEATURES;
2474 }
2475 if (sc->bge_flags & BGE_FLAG_TSO) {
2476 ifp->if_capabilities |= IFCAP_TSO;
2477 ifp->if_hwassist |= CSUM_TSO;
cb623c48 2478 }
984263bc
MD
2479 ifp->if_capenable = ifp->if_capabilities;
2480
984263bc
MD
2481 /*
2482 * Figure out what sort of media we have by checking the
2483 * hardware config word in the first 32k of NIC internal memory,
2484 * or fall back to examining the EEPROM if necessary.
2485 * Note: on some BCM5700 cards, this value appears to be unset.
2486 * If that's the case, we have to rely on identifying the NIC
2487 * by its PCI subsystem ID, as we do below for the SysKonnect
2488 * SK-9D41.
2489 */
0551ac06
SZ
2490 if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC) {
2491 hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG);
3c858e35 2492 } else {
7b47d9c2
SZ
2493 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2494 sizeof(hwcfg))) {
2495 device_printf(dev, "failed to read EEPROM\n");
2496 error = ENXIO;
2497 goto fail;
2498 }
984263bc
MD
2499 hwcfg = ntohl(hwcfg);
2500 }
2501
984263bc 2502 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
4d38e186
SZ
2503 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41 ||
2504 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
2505 if (BGE_IS_5714_FAMILY(sc))
2506 sc->bge_flags |= BGE_FLAG_MII_SERDES;
2507 else
2508 sc->bge_flags |= BGE_FLAG_TBI;
2509 }
984263bc 2510
3c858e35
SZ
2511 /* Setup MI MODE */
2512 if (sc->bge_flags & BGE_FLAG_CPMU)
2513 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST;
2514 else
2515 sc->bge_mi_mode = BGE_MIMODE_BASE;
2516 if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705) {
2517 /* Enable auto polling for BCM570[0-5]. */
2518 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL;
2519 }
2520
2521 /* Setup link status update stuffs */
2522 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2523 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
2524 sc->bge_link_upd = bge_bcm5700_link_upd;
2525 sc->bge_link_chg = BGE_MACSTAT_MI_INTERRUPT;
2526 } else if (sc->bge_flags & BGE_FLAG_TBI) {
2527 sc->bge_link_upd = bge_tbi_link_upd;
2528 sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED;
2529 } else if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
2530 sc->bge_link_upd = bge_autopoll_link_upd;
2531 sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED;
2532 } else {
2533 sc->bge_link_upd = bge_copper_link_upd;
2534 sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED;
2535 }
2536
fd894027
SZ
2537 /*
2538 * Broadcom's own driver always assumes the internal
2539 * PHY is at GMII address 1. On some chips, the PHY responds
2540 * to accesses at all addresses, which could cause us to
2541 * bogusly attach the PHY 32 times at probe type. Always
2542 * restricting the lookup to address 1 is simpler than
2543 * trying to figure out which chips revisions should be
2544 * special-cased.
2545 */
2546 sc->bge_phyno = 1;
2547
0ecb11d7 2548 if (sc->bge_flags & BGE_FLAG_TBI) {
984263bc
MD
2549 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2550 bge_ifmedia_upd, bge_ifmedia_sts);
2551 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2552 ifmedia_add(&sc->bge_ifmedia,
2553 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2554 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2555 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
70059b3c 2556 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
984263bc 2557 } else {
fd894027 2558 struct mii_probe_args mii_args;
57b62224
SZ
2559 int tries;
2560
2561 /*
2562 * Do transceiver setup and tell the firmware the
2563 * driver is down so we can try to get access the
2564 * probe if ASF is running. Retry a couple of times
2565 * if we get a conflict with the ASF firmware accessing
2566 * the PHY.
2567 */
2568 tries = 0;
2569 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2570again:
2571 bge_asf_driver_up(sc);
fd894027
SZ
2572
2573 mii_probe_args_init(&mii_args, bge_ifmedia_upd, bge_ifmedia_sts);
2574 mii_args.mii_probemask = 1 << sc->bge_phyno;
6b4f9f65 2575 mii_args.mii_capmask = capmask;
f31c6e4d
SZ
2576 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY;
2577 mii_args.mii_priv = mii_priv;
fd894027
SZ
2578
2579 error = mii_probe(dev, &sc->bge_miibus, &mii_args);
2580 if (error) {
57b62224
SZ
2581 if (tries++ < 4) {
2582 device_printf(sc->bge_dev, "Probe MII again\n");
2583 bge_miibus_writereg(sc->bge_dev,
2584 sc->bge_phyno, MII_BMCR, BMCR_RESET);
2585 goto again;
2586 }
c6fd6f3b 2587 device_printf(dev, "MII without any PHY!\n");
984263bc
MD
2588 goto fail;
2589 }
57b62224
SZ
2590
2591 /*
2592 * Now tell the firmware we are going up after probing the PHY
2593 */
2594 if (sc->bge_asf_mode & ASF_STACKUP)
2595 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
984263bc
MD
2596 }
2597
26595b18 2598 ctx = device_get_sysctl_ctx(sc->bge_dev);
60c9c7dd 2599 tree = device_get_sysctl_tree(sc->bge_dev);
055d06f0 2600
60c9c7dd 2601 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "rx_coal_ticks",
055d06f0
SZ
2602 CTLTYPE_INT | CTLFLAG_RW,
2603 sc, 0, bge_sysctl_rx_coal_ticks, "I",
2604 "Receive coalescing ticks (usec).");
60c9c7dd 2605 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_coal_ticks",
055d06f0
SZ
2606 CTLTYPE_INT | CTLFLAG_RW,
2607 sc, 0, bge_sysctl_tx_coal_ticks, "I",
2608 "Transmit coalescing ticks (usec).");
60c9c7dd 2609 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "rx_coal_bds",
055d06f0 2610 CTLTYPE_INT | CTLFLAG_RW,
90ad1c96 2611 sc, 0, bge_sysctl_rx_coal_bds, "I",
055d06f0 2612 "Receive max coalesced BD count.");
60c9c7dd 2613 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_coal_bds",
055d06f0 2614 CTLTYPE_INT | CTLFLAG_RW,
90ad1c96 2615 sc, 0, bge_sysctl_tx_coal_bds, "I",
055d06f0 2616 "Transmit max coalesced BD count.");
aad8b3fe 2617
60c9c7dd 2618 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_wreg", CTLFLAG_RW,
aad8b3fe
SZ
2619 &sc->bge_tx_wreg, 0,
2620 "# of segments before writing to hardware register");
2621
c728ae98
SZ
2622 if (sc->bge_flags & BGE_FLAG_PCIE) {
2623 /*
2624 * A common design characteristic for many Broadcom
2625 * client controllers is that they only support a
2626 * single outstanding DMA read operation on the PCIe
2627 * bus. This means that it will take twice as long to
2628 * fetch a TX frame that is split into header and
2629 * payload buffers as it does to fetch a single,
2630 * contiguous TX frame (2 reads vs. 1 read). For these
2631 * controllers, coalescing buffers to reduce the number
2632 * of memory reads is effective way to get maximum
2633 * performance(about 940Mbps). Without collapsing TX
2634 * buffers the maximum TCP bulk transfer performance
2635 * is about 850Mbps. However forcing coalescing mbufs
2636 * consumes a lot of CPU cycles, so leave it off by
2637 * default.
2638 */
60c9c7dd 2639 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
26595b18 2640 "force_defrag", CTLFLAG_RW,
c728ae98
SZ
2641 &sc->bge_force_defrag, 0,
2642 "Force defragment on TX path");
2643 }
90ad1c96
SZ
2644 if (sc->bge_flags & BGE_FLAG_STATUS_TAG) {
2645 if (!BGE_IS_5705_PLUS(sc)) {
60c9c7dd 2646 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
90ad1c96
SZ
2647 "rx_coal_ticks_int", CTLTYPE_INT | CTLFLAG_RW,
2648 sc, 0, bge_sysctl_rx_coal_ticks_int, "I",
2649 "Receive coalescing ticks "
2650 "during interrupt (usec).");
60c9c7dd 2651 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
90ad1c96
SZ
2652 "tx_coal_ticks_int", CTLTYPE_INT | CTLFLAG_RW,
2653 sc, 0, bge_sysctl_tx_coal_ticks_int, "I",
2654 "Transmit coalescing ticks "
2655 "during interrupt (usec).");
2656 }
60c9c7dd 2657 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
90ad1c96
SZ
2658 "rx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2659 sc, 0, bge_sysctl_rx_coal_bds_int, "I",
2660 "Receive max coalesced BD count during interrupt.");
60c9c7dd 2661 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
90ad1c96
SZ
2662 "tx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2663 sc, 0, bge_sysctl_tx_coal_bds_int, "I",
2664 "Transmit max coalesced BD count during interrupt.");
2665 }
c728ae98 2666
984263bc
MD
2667 /*
2668 * Call MI attach routine.
2669 */
78195a76 2670 ether_ifattach(ifp, ether_addr, NULL);
984263bc 2671
4c77af2d
SZ
2672 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->bge_irq));
2673
7e1b2526
SZ
2674#ifdef IFPOLL_ENABLE
2675 /* Polling setup */
60c9c7dd 2676 ifpoll_compat_setup(&sc->bge_npoll, ctx, tree,
26595b18 2677 device_get_unit(dev), ifp->if_serializer);
7e1b2526
SZ
2678#endif
2679
308dcd8e
SZ
2680 if (sc->bge_irq_type == PCI_INTR_TYPE_MSI) {
2681 if (sc->bge_flags & BGE_FLAG_ONESHOT_MSI) {
2682 intr_func = bge_msi_oneshot;
2683 if (bootverbose)
2684 device_printf(dev, "oneshot MSI\n");
2685 } else {
2686 intr_func = bge_msi;
2687 }
2688 } else if (sc->bge_flags & BGE_FLAG_STATUS_TAG) {
2689 intr_func = bge_intr_legacy;
2690 } else {
2691 intr_func = bge_intr_crippled;
2692 }
90ad1c96
SZ
2693 error = bus_setup_intr(dev, sc->bge_irq, INTR_MPSAFE, intr_func, sc,
2694 &sc->bge_intrhand, ifp->if_serializer);
9a717c15
JS
2695 if (error) {
2696 ether_ifdetach(ifp);
2697 device_printf(dev, "couldn't set up irq\n");
2698 goto fail;
2699 }
9db4b353 2700
9a717c15 2701 return(0);
984263bc 2702fail:
9a717c15 2703 bge_detach(dev);
984263bc
MD
2704 return(error);
2705}
2706
2707static int
33c39a69 2708bge_detach(device_t dev)
984263bc 2709{
9a717c15 2710 struct bge_softc *sc = device_get_softc(dev);
984263bc 2711
9a717c15 2712 if (device_is_attached(dev)) {
baf731bb
SZ
2713 struct ifnet *ifp = &sc->arpcom.ac_if;
2714
cdf89432 2715 lwkt_serialize_enter(ifp->if_serializer);
9a717c15 2716 bge_stop(sc);
cdf89432
SZ
2717 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2718 lwkt_serialize_exit(ifp->if_serializer);
984263bc 2719
cdf89432
SZ
2720 ether_ifdetach(ifp);
2721 }
baf731bb 2722
0ecb11d7 2723 if (sc->bge_flags & BGE_FLAG_TBI)
984263bc 2724 ifmedia_removeall(&sc->bge_ifmedia);
cbf32d7e 2725 if (sc->bge_miibus)
984263bc 2726 device_delete_child(dev, sc->bge_miibus);
9a717c15 2727 bus_generic_detach(dev);
984263bc 2728
308dcd8e
SZ
2729 if (sc->bge_irq != NULL) {
2730 bus_release_resource(dev, SYS_RES_IRQ, sc->bge_irq_rid,
2731 sc->bge_irq);
2732 }
2733 if (sc->bge_irq_type == PCI_INTR_TYPE_MSI)
2734 pci_release_msi(dev);
984263bc 2735
308dcd8e 2736 if (sc->bge_res != NULL) {
984263bc
MD
2737 bus_release_resource(dev, SYS_RES_MEMORY,
2738 BGE_PCI_BAR0, sc->bge_res);
308dcd8e 2739 }
ea320e53
SZ
2740 if (sc->bge_res2 != NULL) {
2741 bus_release_resource(dev, SYS_RES_MEMORY,
2742 PCIR_BAR(2), sc->bge_res2);
2743 }
baf731bb
SZ
2744
2745 bge_dma_free(sc);
2746
2747 return 0;
984263bc
MD
2748}
2749
2750static void
33c39a69 2751bge_reset(struct bge_softc *sc)
984263bc 2752{
cc224bea
SZ
2753 device_t dev = sc->bge_dev;
2754 uint32_t cachesize, command, reset, mac_mode, mac_mode_mask;
0ecb11d7 2755 void (*write_op)(struct bge_softc *, uint32_t, uint32_t);
984263bc
MD
2756 int i, val = 0;
2757
cc224bea 2758 mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE;
ea320e53
SZ
2759 if (sc->bge_mfw_flags & BGE_MFW_ON_APE)
2760 mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
cc224bea 2761 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask;
984263bc 2762
591dfc77
SZ
2763 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
2764 sc->bge_asicrev != BGE_ASICREV_BCM5906) {
0ecb11d7
SZ
2765 if (sc->bge_flags & BGE_FLAG_PCIE)
2766 write_op = bge_writemem_direct;
2767 else
2768 write_op = bge_writemem_ind;
2769 } else {
2770 write_op = bge_writereg_ind;
2771 }
2772
ea320e53
SZ
2773 if (sc->bge_asicrev != BGE_ASICREV_BCM5700 &&
2774 sc->bge_asicrev != BGE_ASICREV_BCM5701) {
2775 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
2776 for (i = 0; i < 8000; i++) {
2777 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) &
2778 BGE_NVRAMSWARB_GNT1)
2779 break;
2780 DELAY(20);
2781 }
2782 if (i == 8000) {
2783 if (bootverbose) {
2784 if_printf(&sc->arpcom.ac_if,
2785 "NVRAM lock timedout!\n");
2786 }
2787 }
2788 }
2789 /* Take APE lock when performing reset. */
2790 bge_ape_lock(sc, BGE_APE_LOCK_GRC);
2791
984263bc
MD
2792 /* Save some important PCI state. */
2793 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2794 command = pci_read_config(dev, BGE_PCI_CMD, 4);
984263bc
MD
2795
2796 pci_write_config(dev, BGE_PCI_MISC_CTL,
2797 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
90ad1c96
SZ
2798 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2799 sc->bge_pci_miscctl, 4);
984263bc 2800
0ecb11d7
SZ
2801 /* Disable fastboot on controllers that support it. */
2802 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
832863d2 2803 BGE_IS_5755_PLUS(sc)) {
0ecb11d7
SZ
2804 if (bootverbose)
2805 if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n");
2806 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2807 }
2808
2809 /*
2810 * Write the magic number to SRAM at offset 0xB50.
2811 * When firmware finishes its initialization it will
0551ac06 2812 * write ~BGE_SRAM_FW_MB_MAGIC to the same location.
0ecb11d7 2813 */
0551ac06 2814 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
0ecb11d7 2815
9a6ee7e2
JS
2816 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2817
2818 /* XXX: Broadcom Linux driver. */
0ecb11d7 2819 if (sc->bge_flags & BGE_FLAG_PCIE) {
61ccfcea
SZ
2820 /* Force PCI-E 1.0a mode */
2821 if (sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
2822 CSR_READ_4(sc, BGE_PCIE_PHY_TSTCTL) ==
f3cd9a2d 2823 (BGE_PCIE_PHY_TSTCTL_PSCRAM |
61ccfcea
SZ
2824 BGE_PCIE_PHY_TSTCTL_PCIE10)) {
2825 CSR_WRITE_4(sc, BGE_PCIE_PHY_TSTCTL,
f3cd9a2d 2826 BGE_PCIE_PHY_TSTCTL_PSCRAM);
61ccfcea 2827 }
9a6ee7e2
JS
2828 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2829 /* Prevent PCIE link training during global reset */
2830 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2831 reset |= (1<<29);
2832 }
2833 }
2834
591dfc77
SZ
2835 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2836 uint32_t status, ctrl;
2837
2838 status = CSR_READ_4(sc, BGE_VCPU_STATUS);
2839 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
2840 status | BGE_VCPU_STATUS_DRV_RESET);
2841 ctrl = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
2842 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
2843 ctrl & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
2844 }
2845
df2b0626
SZ
2846 /*
2847 * Set GPHY Power Down Override to leave GPHY
2848 * powered up in D0 uninitialized.
2849 */
2850 if (BGE_IS_5705_PLUS(sc) && (sc->bge_flags & BGE_FLAG_CPMU) == 0)
2851 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
2852
2853 /* Issue global reset */
2854 write_op(sc, BGE_MISC_CFG, reset);
2855
cc224bea
SZ
2856 if (sc->bge_flags & BGE_FLAG_PCIE)
2857 DELAY(100 * 1000);
2858 else
2859 DELAY(1000);
984263bc 2860
9a6ee7e2 2861 /* XXX: Broadcom Linux driver. */
0ecb11d7 2862 if (sc->bge_flags & BGE_FLAG_PCIE) {
1b13d01b
SZ
2863 uint16_t devctl;
2864
9a6ee7e2
JS
2865 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2866 uint32_t v;
2867
2868 DELAY(500000); /* wait for link training to complete */
2869 v = pci_read_config(dev, 0xc4, 4);
2870 pci_write_config(dev, 0xc4, v | (1<<15), 4);
2871 }
1b13d01b 2872
1b13d01b
SZ
2873 devctl = pci_read_config(dev,
2874 sc->bge_pciecap + PCIER_DEVCTRL, 2);
25e445ac
SZ
2875
2876 /* Disable no snoop and disable relaxed ordering. */
1b13d01b 2877 devctl &= ~(PCIEM_DEVCTL_RELAX_ORDER | PCIEM_DEVCTL_NOSNOOP);
25e445ac
SZ
2878
2879 /* Old PCI-E chips only support 128 bytes Max PayLoad Size. */
2880 if ((sc->bge_flags & BGE_FLAG_CPMU) == 0) {
2881 devctl &= ~PCIEM_DEVCTL_MAX_PAYLOAD_MASK;
2882 devctl |= PCIEM_DEVCTL_MAX_PAYLOAD_128;
2883 }
2884
1b13d01b
SZ
2885 pci_write_config(dev, sc->bge_pciecap + PCIER_DEVCTRL,
2886 devctl, 2);
2887
2888 /* Clear error status. */
2889 pci_write_config(dev, sc->bge_pciecap + PCIER_DEVSTS,
2890 PCIEM_DEVSTS_CORR_ERR |
2891 PCIEM_DEVSTS_NFATAL_ERR |
2892 PCIEM_DEVSTS_FATAL_ERR |
2893 PCIEM_DEVSTS_UNSUPP_REQ, 2);
9a6ee7e2
JS
2894 }
2895
984263bc
MD
2896 /* Reset some of the PCI state that got zapped by reset */
2897 pci_write_config(dev, BGE_PCI_MISC_CTL,
2898 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
90ad1c96
SZ
2899 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2900 sc->bge_pci_miscctl, 4);
cc224bea
SZ
2901 val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE;
2902 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0 &&
2903 (sc->bge_flags & BGE_FLAG_PCIX))
2904 val |= BGE_PCISTATE_RETRY_SAME_DMA;
ea320e53
SZ
2905 if (sc->bge_mfw_flags & BGE_MFW_ON_APE) {
2906 val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
2907 BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
2908 BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
2909 }
cc224bea 2910 pci_write_config(dev, BGE_PCI_PCISTATE, val, 4);
984263bc
MD
2911 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2912 pci_write_config(dev, BGE_PCI_CMD, command, 4);
984263bc 2913
ab8c1124
SZ
2914 /*
2915 * Disable PCI-X relaxed ordering to ensure status block update
2916 * comes first then packet buffer DMA. Otherwise driver may
2917 * read stale status block.
2918 */
2919 if (sc->bge_flags & BGE_FLAG_PCIX) {
2920 uint16_t devctl;
2921
2922 devctl = pci_read_config(dev,
2923 sc->bge_pcixcap + PCIXR_COMMAND, 2);
2924 devctl &= ~PCIXM_COMMAND_ERO;
2925 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
2926 devctl &= ~PCIXM_COMMAND_MAX_READ;
2927 devctl |= PCIXM_COMMAND_MAX_READ_2048;
2928 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
2929 devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
2930 PCIXM_COMMAND_MAX_READ);
2931 devctl |= PCIXM_COMMAND_MAX_READ_2048;
2932 }
2933 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
2934 devctl, 2);
2935 }
2936
308dcd8e
SZ
2937 /*
2938 * Enable memory arbiter and re-enable MSI if necessary.
2939 */
0ecb11d7
SZ
2940 if (BGE_IS_5714_FAMILY(sc)) {
2941 uint32_t val;
2942
308dcd8e
SZ
2943 if (sc->bge_irq_type == PCI_INTR_TYPE_MSI) {
2944 /*
2945 * Resetting BCM5714 family will clear MSI
2946 * enable bit; restore it after resetting.
2947 */
2948 PCI_SETBIT(sc->bge_dev, sc->bge_msicap + PCIR_MSI_CTRL,
2949 PCIM_MSICTRL_MSI_ENABLE, 2);
2950 BGE_SETBIT(sc, BGE_MSI_MODE, BGE_MSIMODE_ENABLE);
2951 }
0ecb11d7
SZ
2952 val = CSR_READ_4(sc, BGE_MARB_MODE);
2953 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2954 } else {
a313b56f 2955 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
0ecb11d7 2956 }
a313b56f 2957
cc224bea
SZ
2958 /* Fix up byte swapping. */
2959 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
2960 BGE_MODECTL_BYTESWAP_DATA);
2961
2962 val = CSR_READ_4(sc, BGE_MAC_MODE);
2963 val = (val & ~mac_mode_mask) | mac_mode;
2964 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
2965 DELAY(40);
2966
ea320e53
SZ
2967 bge_ape_unlock(sc, BGE_APE_LOCK_GRC);
2968
591dfc77
SZ
2969 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2970 for (i = 0; i < BGE_TIMEOUT; i++) {
2971 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2972 if (val & BGE_VCPU_STATUS_INIT_DONE)
2973 break;
2974 DELAY(100);
2975 }
2976 if (i == BGE_TIMEOUT) {
2977 if_printf(&sc->arpcom.ac_if, "reset timed out\n");
2978 return;
2979 }
2980 } else {
f9286245
SZ
2981 int delay_us = 10;
2982
2983 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
2984 delay_us = 100;
2985
591dfc77
SZ
2986 /*
2987 * Poll until we see the 1's complement of the magic number.
2988 * This indicates that the firmware initialization
2989 * is complete.
2990 */
d880f7b3 2991 for (i = 0; i < BGE_FIRMWARE_TIMEOUT; i++) {
0551ac06
SZ
2992 val = bge_readmem_ind(sc, BGE_SRAM_FW_MB);
2993 if (val == ~BGE_SRAM_FW_MB_MAGIC)
591dfc77 2994 break;
f9286245 2995 DELAY(delay_us);
591dfc77 2996 }
d880f7b3 2997 if (i == BGE_FIRMWARE_TIMEOUT) {
591dfc77
SZ
2998 if_printf(&sc->arpcom.ac_if, "firmware handshake "
2999 "timed out, found 0x%08x\n", val);
591dfc77 3000 }
984263bc
MD
3001 }
3002
70059b3c
JS
3003 /*
3004 * The 5704 in TBI mode apparently needs some special
3005 * adjustment to insure the SERDES drive level is set
3006 * to 1.2V.
3007 */
0ecb11d7
SZ
3008 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3009 (sc->bge_flags & BGE_FLAG_TBI)) {
70059b3c
JS
3010 uint32_t serdescfg;
3011
3012 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
3013 serdescfg = (serdescfg & ~0xFFF) | 0x880;
3014 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
3015 }
3016
9a6ee7e2 3017 /* XXX: Broadcom Linux driver. */
0ecb11d7 3018 if ((sc->bge_flags & BGE_FLAG_PCIE) &&
3dfc12af
SZ
3019 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3020 sc->bge_asicrev != BGE_ASICREV_BCM5785) {
9a6ee7e2 3021 uint32_t v;
984263bc 3022
3dfc12af 3023 /* Enable Data FIFO protection. */
f1f34fc4
SZ
3024 v = CSR_READ_4(sc, BGE_PCIE_TLDLPL_PORT);
3025 CSR_WRITE_4(sc, BGE_PCIE_TLDLPL_PORT, v | (1 << 25));
9a6ee7e2
JS
3026 }
3027
3028 DELAY(10000);
984263bc
MD
3029}
3030
3031/*
3032 * Frame reception handling. This is called if there's a frame
3033 * on the receive return list.
3034 *
3035 * Note: we have to be able to handle two possibilities here:
3036 * 1) the frame is from the jumbo recieve ring
3037 * 2) the frame is from the standard receive ring
3038 */
3039
3040static void
1c40ce90 3041bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int count)
984263bc
MD
3042{
3043 struct ifnet *ifp;
3044 int stdcnt = 0, jumbocnt = 0;
3045
3046 ifp = &sc->arpcom.ac_if;
3047
1c40ce90 3048 while (sc->bge_rx_saved_considx != rx_prod && count != 0) {
984263bc 3049 struct bge_rx_bd *cur_rx;
33c39a69 3050 uint32_t rxidx;
984263bc 3051 struct mbuf *m = NULL;
33c39a69 3052 uint16_t vlan_tag = 0;
984263bc
MD
3053 int have_tag = 0;
3054
1c40ce90
SZ
3055 --count;
3056
984263bc 3057 cur_rx =
20c9a969 3058 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
984263bc
MD
3059
3060 rxidx = cur_rx->bge_idx;
7e40b8c5 3061 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
6b880771 3062 logif(rx_pkt);
984263bc
MD
3063
3064 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3065 have_tag = 1;
3066 vlan_tag = cur_rx->bge_vlan_tag;
3067 }
3068
3069 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3070 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
984263bc 3071 jumbocnt++;
1436f9a0
SZ
3072
3073 if (rxidx != sc->bge_jumbo) {
d40991ef 3074 IFNET_STAT_INC(ifp, ierrors, 1);
1436f9a0
SZ
3075 if_printf(ifp, "sw jumbo index(%d) "
3076 "and hw jumbo index(%d) mismatch, drop!\n",
3077 sc->bge_jumbo, rxidx);
3078 bge_setup_rxdesc_jumbo(sc, rxidx);
3079 continue;
3080 }
3081
3082 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx].bge_mbuf;
984263bc 3083 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
d40991ef 3084 IFNET_STAT_INC(ifp, ierrors, 1);
1436f9a0 3085 bge_setup_rxdesc_jumbo(sc, sc->bge_jumbo);
984263bc
MD
3086 continue;
3087 }
1436f9a0 3088 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 0)) {
d40991ef 3089 IFNET_STAT_INC(ifp, ierrors, 1);
1436f9a0 3090 bge_setup_rxdesc_jumbo(sc, sc->bge_jumbo);
984263bc
MD
3091 continue;
3092 }
3093 } else {
4849f4a3
SZ
3094 int discard = 0;
3095
984263bc 3096 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
984263bc 3097 stdcnt++;
1436f9a0
SZ
3098
3099 if (rxidx != sc->bge_std) {
d40991ef 3100 IFNET_STAT_INC(ifp, ierrors, 1);
1436f9a0
SZ
3101 if_printf(ifp, "sw std index(%d) "
3102 "and hw std index(%d) mismatch, drop!\n",
3103 sc->bge_std, rxidx);
3104 bge_setup_rxdesc_std(sc, rxidx);
4849f4a3
SZ
3105 discard = 1;
3106 goto refresh_rx;
1436f9a0
SZ
3107 }
3108
3109 m = sc->bge_cdata.bge_rx_std_chain[rxidx].bge_mbuf;
984263bc 3110 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
d40991ef 3111 IFNET_STAT_INC(ifp, ierrors, 1);
1436f9a0 3112 bge_setup_rxdesc_std(sc, sc->bge_std);
4849f4a3
SZ
3113 discard = 1;
3114 goto refresh_rx;
984263bc 3115 }
1436f9a0 3116 if (bge_newbuf_std(sc, sc->bge_std, 0)) {
d40991ef 3117 IFNET_STAT_INC(ifp, ierrors, 1);
1436f9a0 3118 bge_setup_rxdesc_std(sc, sc->bge_std);
4849f4a3 3119 discard = 1;
984263bc 3120 }
4849f4a3
SZ
3121refresh_rx:
3122 if (sc->bge_rx_wreg > 0 && stdcnt >= sc->bge_rx_wreg) {
3123 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO,
3124 sc->bge_std);
3125 stdcnt = 0;
3126 }
3127 if (discard)
3128 continue;
984263bc
MD
3129 }
3130
d40991ef 3131 IFNET_STAT_INC(ifp, ipackets, 1);
061def6f 3132#if !defined(__i386__) && !defined(__x86_64__)
984263bc 3133 /*
061def6f 3134 * The x86 allows unaligned accesses, but for other
984263bc
MD
3135 * platforms we must make sure the payload is aligned.
3136 */
0ecb11d7 3137 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
984263bc
MD
3138 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3139 cur_rx->bge_len);
3140 m->m_data += ETHER_ALIGN;
3141 }
3142#endif
160185fa 3143 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
984263bc
MD
3144 m->m_pkthdr.rcvif = ifp;
3145
cb623c48
SZ
3146 if (ifp->if_capenable & IFCAP_RXCSUM) {
3147 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3148 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3149 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
3150 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3151 }
17240569 3152 if ((cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) &&
da4fe422 3153 m->m_pkthdr.len >= BGE_MIN_FRAMELEN) {
984263bc 3154 m->m_pkthdr.csum_data =
17240569 3155 cur_rx->bge_tcp_udp_csum;
bf29e666
SZ
3156 m->m_pkthdr.csum_flags |=
3157 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
984263bc
MD
3158 }
3159 }
984263bc
MD
3160
3161 /*
3162 * If we received a packet with a vlan tag, pass it
3163 * to vlan_input() instead of ether_input().
3164 */
3165 if (have_tag) {
e6b5847c
SZ
3166 m->m_flags |= M_VLANTAG;
3167 m->m_pkthdr.ether_vlantag = vlan_tag;
984263bc 3168 }
73029d08 3169 ifp->if_input(ifp, m, NULL, -1);
984263bc
MD
3170 }
3171
591dfc77 3172 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
984263bc 3173 if (stdcnt)
591dfc77 3174 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
984263bc 3175 if (jumbocnt)
591dfc77 3176 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
984263bc
MD
3177}
3178
3179static void
90ad1c96 3180bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
984263bc 3181{
984263bc
MD
3182 struct ifnet *ifp;
3183
3184 ifp = &sc->arpcom.ac_if;
3185
3186 /*
3187 * Go through our tx ring and free mbufs for those
3188 * frames that have been sent.
3189 */
90ad1c96 3190 while (sc->bge_tx_saved_considx != tx_cons) {
20c9a969 3191 uint32_t idx = 0;
984263bc
MD
3192
3193 idx = sc->bge_tx_saved_considx;
984263bc 3194 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
d40991ef 3195 IFNET_STAT_INC(ifp, opackets, 1);
ddca511d 3196 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
20c9a969 3197 sc->bge_cdata.bge_tx_dmamap[idx]);
984263bc
MD
3198 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
3199 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3200 }
3201 sc->bge_txcnt--;
3202 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
6b880771 3203 logif(tx_pkt);
984263bc
MD
3204 }
3205
421aaa40 3206 if ((BGE_TX_RING_CNT - sc->bge_txcnt) >=
e92f005c 3207 (sc->bge_txrsvd + sc->bge_txspare))
9ed293e0 3208 ifq_clr_oactive(&ifp->if_snd);
20c9a969 3209
142ca760
SZ
3210 if (sc->bge_txcnt == 0)
3211 ifp->if_timer = 0;
3212
20c9a969 3213 if (!ifq_is_empty(&ifp->if_snd))
9db4b353 3214 if_devstart(ifp);
984263bc
MD
3215}
3216
7e1b2526 3217#ifdef IFPOLL_ENABLE
315fe0ee
MD
3218
3219static void
1c40ce90 3220bge_npoll_compat(struct ifnet *ifp, void *arg __unused, int cycles)
315fe0ee
MD
3221{
3222 struct bge_softc *sc = ifp->if_softc;
90ad1c96
SZ
3223 struct bge_status_block *sblk = sc->bge_ldata.bge_status_block;
3224 uint16_t rx_prod, tx_cons;
315fe0ee 3225
7e1b2526
SZ
3226 ASSERT_SERIALIZED(ifp->if_serializer);
3227
3228 if (sc->bge_npoll.ifpc_stcount-- == 0) {
3229 sc->bge_npoll.ifpc_stcount = sc->bge_npoll.ifpc_stfrac;
315fe0ee
MD
3230 /*
3231 * Process link state changes.
3232 */
e287b14f 3233 bge_link_poll(sc);
7e1b2526
SZ
3234 }
3235
3236 if (sc->bge_flags & BGE_FLAG_STATUS_TAG) {
3237 sc->bge_status_tag = sblk->bge_status_tag;
3238 /*
3239 * Use a load fence to ensure that status_tag
3240 * is saved before rx_prod and tx_cons.
3241 */
3242 cpu_lfence();
3243 }
3244
3245 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
3246 if (sc->bge_rx_saved_considx != rx_prod)
1c40ce90 3247 bge_rxeof(sc, rx_prod, cycles);
7e1b2526
SZ
3248
3249 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
3250 if (sc->bge_tx_saved_considx != tx_cons)
3251 bge_txeof(sc, tx_cons);
3252
3253 if (sc->bge_flags & BGE_FLAG_STATUS_TAG)
3254 bge_writembx(sc, BGE_MBX_IRQ0_LO, sc->bge_status_tag << 24);
c39d28b2
SZ
3255
3256 if (sc->bge_coal_chg)
3257 bge_coal_change(sc);
7e1b2526
SZ
3258}
3259
3260static void
3261bge_npoll(struct ifnet *ifp, struct ifpoll_info *info)
3262{
3263 struct bge_softc *sc = ifp->if_softc;
3264
3265 ASSERT_SERIALIZED(ifp->if_serializer);
3266
3267 if (info != NULL) {
3268 int cpuid = sc->bge_npoll.ifpc_cpuid;
3269
3270 info->ifpi_rx[cpuid].poll_func = bge_npoll_compat;
3271 info->ifpi_rx[cpuid].arg = NULL;
3272 info->ifpi_rx[cpuid].serializer = ifp->if_serializer;
3273
3274 if (ifp->if_flags & IFF_RUNNING)
3275 bge_disable_intr(sc);
dfd3b18b 3276 ifq_set_cpuid(&ifp->if_snd, cpuid);
7e1b2526
SZ
3277 } else {
3278 if (ifp->if_flags & IFF_RUNNING)
3279 bge_enable_intr(sc);
dfd3b18b 3280 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->bge_irq));
315fe0ee
MD
3281 }
3282}
3283
7e1b2526 3284#endif /* IFPOLL_ENABLE */
315fe0ee 3285
984263bc 3286static void
308dcd8e 3287bge_intr_crippled(void *xsc)
984263bc 3288{
bf522c7f 3289 struct bge_softc *sc = xsc;
33c39a69 3290 struct ifnet *ifp = &sc->arpcom.ac_if;
6b880771
SZ
3291
3292 logif(intr);
0029ccf6 3293
142ca760
SZ
3294 /*
3295 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
3296 * disable interrupts by writing nonzero like we used to, since with
3297 * our current organization this just gives complications and
3298 * pessimizations for re-enabling interrupts. We used to have races
3299 * instead of the necessary complications. Disabling interrupts
3300 * would just reduce the chance of a status update while we are
3301 * running (by switching to the interrupt-mode coalescence
3302 * parameters), but this chance is already very low so it is more
3303 * efficient to get another interrupt than prevent it.
3304 *
3305 * We do the ack first to ensure another interrupt if there is a
3306 * status update after the ack. We don't check for the status
3307 * changing later because it is more efficient to get another
3308 * interrupt than prevent it, not quite as above (not checking is
3309 * a smaller optimization than not toggling the interrupt enable,
3310 * since checking doesn't involve PCI accesses and toggling require
3311 * the status check). So toggling would probably be a pessimization
3312 * even with MSI. It would only be needed for using a task queue.
3313 */
591dfc77 3314 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
142ca760 3315
984263bc
MD
3316 /*
3317 * Process link state changes.
984263bc 3318 */
e287b14f 3319 bge_link_poll(sc);
984263bc
MD
3320
3321 if (ifp->if_flags & IFF_RUNNING) {
90ad1c96
SZ
3322 struct bge_status_block *sblk = sc->bge_ldata.bge_status_block;
3323 uint16_t rx_prod, tx_cons;
3324
3325 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
3326 if (sc->bge_rx_saved_considx != rx_prod)
1c40ce90 3327 bge_rxeof(sc, rx_prod, -1);
90ad1c96
SZ
3328
3329 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
3330 if (sc->bge_tx_saved_considx != tx_cons)
3331 bge_txeof(sc, tx_cons);
3332 }
3333
3334 if (sc->bge_coal_chg)
3335 bge_coal_change(sc);
3336}
3337
3338static void
308dcd8e 3339bge_intr_legacy(void *xsc)
90ad1c96
SZ
3340{
3341 struct bge_softc *sc = xsc;
90ad1c96 3342 struct bge_status_block *sblk = sc->bge_ldata.bge_status_block;
90ad1c96
SZ
3343
3344 if (sc->bge_status_tag == sblk->bge_status_tag) {
e287b14f
SZ
3345 uint32_t val;
3346
90ad1c96
SZ
3347 val = pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4);
3348 if (val & BGE_PCISTAT_INTR_NOTACT)
3349 return;
3350 }
3351
3352 /*
3353 * NOTE:
3354 * Interrupt will have to be disabled if tagged status
3355 * is used, else interrupt will always be asserted on
3356 * certain chips (at least on BCM5750 AX/BX).
3357 */
3358 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3359
308dcd8e
SZ
3360 bge_intr(sc);
3361}
3362
3363static void
3364bge_msi(void *xsc)
3365{
3366 struct bge_softc *sc = xsc;
3367
3368 /* Disable interrupt first */
3369 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3370 bge_intr(sc);
3371}
3372
3373static void
3374bge_msi_oneshot(void *xsc)
3375{
3376 bge_intr(xsc);
3377}
3378
3379static void
3380bge_intr(struct bge_softc *sc)
3381{
3382 struct ifnet *ifp = &sc->arpcom.ac_if;
3383 struct bge_status_block *sblk = sc->bge_ldata.bge_status_block;
3384 uint16_t rx_prod, tx_cons;
3385 uint32_t status;
3386
90ad1c96
SZ
3387 sc->bge_status_tag = sblk->bge_status_tag;
3388 /*
3389 * Use a load fence to ensure that status_tag is saved
9fb11f90 3390 * before rx_prod, tx_cons and status.
90ad1c96
SZ
3391 */
3392 cpu_lfence();
3393
3394 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
3395 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
3396 status = sblk->bge_status;
3397
e287b14f
SZ
3398 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) || sc->bge_link_evt)
3399 bge_link_poll(sc);
90ad1c96
SZ
3400
3401 if (ifp->if_flags & IFF_RUNNING) {
3402 if (sc->bge_rx_saved_considx != rx_prod)
1c40ce90 3403 bge_rxeof(sc, rx_prod, -1);
984263bc 3404
90ad1c96
SZ
3405 if (sc->bge_tx_saved_considx != tx_cons)
3406 bge_txeof(sc, tx_cons);
984263bc 3407 }
055d06f0 3408
90ad1c96
SZ
3409 bge_writembx(sc, BGE_MBX_IRQ0_LO, sc->bge_status_tag << 24);
3410
055d06f0
SZ
3411 if (sc->bge_coal_chg)
3412 bge_coal_change(sc);
984263bc
MD
3413}
3414
3415static void
33c39a69 3416bge_tick(void *xsc)
78195a76
MD
3417{
3418 struct bge_softc *sc = xsc;
3419 struct ifnet *ifp = &sc->arpcom.ac_if;
3420
3421 lwkt_serialize_enter(ifp->if_serializer);
984263bc 3422
0ecb11d7 3423 if (BGE_IS_5705_PLUS(sc))
7e40b8c5
HP
3424 bge_stats_update_regs(sc);
3425 else
3426 bge_stats_update(sc);
9a717c15 3427
0ecb11d7 3428 if (sc->bge_flags & BGE_FLAG_TBI) {
db861466
SZ
3429 /*
3430 * Since in TBI mode auto-polling can't be used we should poll
3431 * link status manually. Here we register pending link event
3432 * and trigger interrupt.
3433 */
3434 sc->bge_link_evt++;
14852ccc 3435 if (BGE_IS_CRIPPLED(sc))
5225ba10
SZ
3436 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3437 else
3438 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3f82ed83 3439 } else if (!sc->bge_link) {
db861466 3440 mii_tick(device_get_softc(sc->bge_miibus));
984263bc
MD
3441 }
3442
57b62224
SZ
3443 bge_asf_driver_up(sc);
3444
db861466
SZ
3445 callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
3446
3447 lwkt_serialize_exit(ifp->if_serializer);
984263bc
MD
3448}
3449
7e40b8c5 3450static void
33c39a69 3451bge_stats_update_regs(struct bge_softc *sc)
7e40b8c5 3452{
33c39a69 3453 struct ifnet *ifp = &sc->arpcom.ac_if;
7e40b8c5 3454 struct bge_mac_stats_regs stats;
33c39a69 3455 uint32_t *s;
7e40b8c5
HP
3456 int i;
3457
33c39a69 3458 s = (uint32_t *)&stats;
7e40b8c5
HP
3459 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
3460 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
3461 s++;
3462 }
3463
d40991ef 3464 IFNET_STAT_SET(ifp, collisions,
7e40b8c5
HP
3465 (stats.dot3StatsSingleCollisionFrames +
3466 stats.dot3StatsMultipleCollisionFrames +
3467 stats.dot3StatsExcessiveCollisions +
d40991ef 3468 stats.dot3StatsLateCollisions));
7e40b8c5
HP
3469}
3470
984263bc 3471static void
33c39a69 3472bge_stats_update(struct bge_softc *sc)
984263bc 3473{
33c39a69 3474 struct ifnet *ifp = &sc->arpcom.ac_if;
20c9a969
SZ
3475 bus_size_t stats;
3476
3477 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
984263bc 3478
20c9a969
SZ
3479#define READ_STAT(sc, stats, stat) \
3480 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
984263bc 3481
d40991ef 3482 IFNET_STAT_SET(ifp, collisions,
20c9a969
SZ
3483 (READ_STAT(sc, stats,
3484 txstats.dot3StatsSingleCollisionFrames.bge_addr_lo) +
3485 READ_STAT(sc, stats,
3486 txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo) +
3487 READ_STAT(sc, stats,
3488 txstats.dot3StatsExcessiveCollisions.bge_addr_lo) +
3489 READ_STAT(sc, stats,
d40991ef 3490 txstats.dot3StatsLateCollisions.bge_addr_lo)));
984263bc 3491
20c9a969
SZ
3492#undef READ_STAT
3493
984263bc 3494#ifdef notdef
d40991ef 3495 IFNET_STAT_SET(ifp, collisions,
984263bc
MD
3496 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
3497 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
3498 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
d40991ef 3499 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions));
984263bc 3500#endif
984263bc
MD
3501}
3502
3503/*
3504 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3505 * pointers to descriptors.
3506 */
3507static int
aad8b3fe
SZ
3508bge_encap(struct bge_softc *sc, struct mbuf **m_head0, uint32_t *txidx,
3509 int *segs_used)
984263bc 3510{
e92f005c
SZ
3511 struct bge_tx_bd *d = NULL, *last_d;
3512 uint16_t csum_flags = 0, mss = 0;
20c9a969
SZ
3513 bus_dma_segment_t segs[BGE_NSEG_NEW];
3514 bus_dmamap_t map;
2de621e9 3515 int error, maxsegs, nsegs, idx, i;
e0b35c1f 3516 struct mbuf *m_head = *m_head0, *m_new;
984263bc 3517
e92f005c
SZ
3518 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3519 error = bge_setup_tso(sc, m_head0, &mss, &csum_flags);
3520 if (error)
3521 return ENOBUFS;
3522 m_head = *m_head0;
3523 } else if (m_head->m_pkthdr.csum_flags & BGE_CSUM_FEATURES) {
984263bc
MD
3524 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
3525 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3526 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
3527 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3528 if (m_head->m_flags & M_LASTFRAG)
3529 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3530 else if (m_head->m_flags & M_FRAG)
3531 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3532 }
20c9a969
SZ
3533
3534 idx = *txidx;
3535 map = sc->bge_cdata.bge_tx_dmamap[idx];
3536
e92f005c
SZ
3537 maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - sc->bge_txrsvd;
3538 KASSERT(maxsegs >= sc->bge_txspare,
ed20d0e3 3539 ("not enough segments %d", maxsegs));
20c9a969
SZ
3540
3541 if (maxsegs > BGE_NSEG_NEW)
3542 maxsegs = BGE_NSEG_NEW;
3543
cb623c48 3544 /*
da4fe422
SZ
3545 * Pad outbound frame to BGE_MIN_FRAMELEN for an unusual reason.
3546 * The bge hardware will pad out Tx runts to BGE_MIN_FRAMELEN,
cb623c48
SZ
3547 * but when such padded frames employ the bge IP/TCP checksum
3548 * offload, the hardware checksum assist gives incorrect results
3549 * (possibly from incorporating its own padding into the UDP/TCP
3550 * checksum; who knows). If we pad such runts with zeros, the
2679514c 3551 * onboard checksum comes out correct.
cb623c48
SZ
3552 */
3553 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
da4fe422
SZ
3554 m_head->m_pkthdr.len < BGE_MIN_FRAMELEN) {
3555 error = m_devpad(m_head, BGE_MIN_FRAMELEN);
2679514c
SZ
3556 if (error)
3557 goto back;
cb623c48 3558 }
2679514c 3559
e0b35c1f
SZ
3560 if ((sc->bge_flags & BGE_FLAG_SHORTDMA) && m_head->m_next != NULL) {
3561 m_new = bge_defrag_shortdma(m_head);
3562 if (m_new == NULL) {
3563 error = ENOBUFS;
3564 goto back;
3565 }
3566 *m_head0 = m_head = m_new;
3567 }
e92f005c
SZ
3568 if ((m_head->m_pkthdr.csum_flags & CSUM_TSO) == 0 &&
3569 sc->bge_force_defrag && (sc->bge_flags & BGE_FLAG_PCIE) &&
c728ae98 3570 m_head->m_next != NULL) {
c728ae98
SZ
3571 /*
3572 * Forcefully defragment mbuf chain to overcome hardware
3573 * limitation which only support a single outstanding
3574 * DMA read operation. If it fails, keep moving on using
3575 * the original mbuf chain.
3576 */
b5523eac 3577 m_new = m_defrag(m_head, M_NOWAIT);
c728ae98
SZ
3578 if (m_new != NULL)
3579 *m_head0 = m_head = m_new;
3580 }
3581
2de621e9
SZ
3582 error = bus_dmamap_load_mbuf_defrag(sc->bge_cdata.bge_tx_mtag, map,
3583 m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
3584 if (error)
20c9a969 3585 goto back;
aad8b3fe 3586 *segs_used += nsegs;
984263bc 3587
2de621e9 3588 m_head = *m_head0;
ddca511d 3589 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
984263bc 3590
20c9a969
SZ
3591 for (i = 0; ; i++) {
3592 d = &sc->bge_ldata.bge_tx_ring[idx];
984263bc 3593
2de621e9
SZ
3594 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
3595 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
20c9a969
SZ
3596 d->bge_len = segs[i].ds_len;
3597 d->bge_flags = csum_flags;
e92f005c 3598 d->bge_mss = mss;
984263bc 3599
2de621e9 3600 if (i == nsegs - 1)
20c9a969
SZ
3601 break;
3602 BGE_INC(idx, BGE_TX_RING_CNT);
3603 }
e92f005c 3604 last_d = d;
984263bc 3605
20c9a969
SZ
3606 /* Set vlan tag to the first segment of the packet. */
3607 d = &sc->bge_ldata.bge_tx_ring[*txidx];
83790f85 3608 if (m_head->m_flags & M_VLANTAG) {
20c9a969 3609 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
83790f85 3610 d->bge_vlan_tag = m_head->m_pkthdr.ether_vlantag;
20c9a969
SZ
3611 } else {
3612 d->bge_vlan_tag = 0;
3613 }
3614
e92f005c
SZ
3615 /* Mark the last segment as end of packet... */
3616 last_d->bge_flags |= BGE_TXBDFLAG_END;
3617
20c9a969
SZ
3618 /*
3619 * Insure that the map for this transmission is placed at
3620 * the array index of the last descriptor in this chain.
3621 */
3622 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
3623 sc->bge_cdata.bge_tx_dmamap[idx] = map;
3624 sc->bge_cdata.bge_tx_chain[idx] = m_head;
2de621e9 3625 sc->bge_txcnt += nsegs;
20c9a969
SZ
3626
3627 BGE_INC(idx, BGE_TX_RING_CNT);
3628 *txidx = idx;
3629back:
4a607ed6 3630 if (error) {
2de621e9 3631 m_freem(*m_head0);
4a607ed6
SZ
3632 *m_head0 = NULL;
3633 }
20c9a969 3634 return error;
984263bc
MD
3635}
3636
aad8b3fe
SZ
3637static void
3638bge_xmit(struct bge_softc *sc, uint32_t prodidx)
3639{
3640 /* Transmit */
3641 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3642 /* 5700 b2 errata */
3643 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3644 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3645}
3646
984263bc
MD
3647/*
3648 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3649 * to the mbuf data regions directly in the transmit descriptors.
3650 */
3651static void
f0a26983 3652bge_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
984263bc 3653{
20c9a969 3654 struct bge_softc *sc = ifp->if_softc;
984263bc 3655 struct mbuf *m_head = NULL;
20c9a969 3656 uint32_t prodidx;
aad8b3fe 3657 int nsegs = 0;
984263bc 3658
f0a26983
SZ
3659 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
3660
9ed293e0 3661 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
984263bc
MD
3662 return;
3663
94db8384 3664 prodidx = sc->bge_tx_prodidx;
984263bc 3665
75544bcd 3666 while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
ac9843a1 3667 m_head = ifq_dequeue(&ifp->if_snd);
984263bc
MD
3668 if (m_head == NULL)
3669 break;
3670
3671 /*
cb623c48
SZ
3672 * XXX
3673 * The code inside the if() block is never reached since we
3674 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3675 * requests to checksum TCP/UDP in a fragmented packet.
3676 *
984263bc
MD
3677 * XXX
3678 * safety overkill. If this is a fragmented packet chain
3679 * with delayed TCP/UDP checksums, then only encapsulate
3680 * it if we have enough descriptors to handle the entire
3681 * chain at once.
3682 * (paranoia -- may not actually be needed)
3683 */
9db4b353
SZ
3684 if ((m_head->m_flags & M_FIRSTFRAG) &&
3685 (m_head->m_pkthdr.csum_flags & CSUM_DELAY_DATA)) {
984263bc 3686 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
e92f005c 3687 m_head->m_pkthdr.csum_data + sc->bge_txrsvd) {
9ed293e0 3688 ifq_set_oactive(&ifp->if_snd);
9db4b353 3689 ifq_prepend(&ifp->if_snd, m_head);
984263bc
MD
3690 break;
3691 }
3692 }
3693
20c9a969 3694 /*
e92f005c 3695 * Sanity check: avoid coming within bge_txrsvd
20c9a969 3696 * descriptors of the end of the ring. Also make
e92f005c 3697 * sure there are bge_txspare descriptors for
20c9a969
SZ
3698 * jumbo buffers' defragmentation.
3699 */
3700 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
e92f005c 3701 (sc->bge_txrsvd + sc->bge_txspare)) {
9ed293e0 3702 ifq_set_oactive(&ifp->if_snd);
9db4b353 3703 ifq_prepend(&ifp->if_snd, m_head);
20c9a969
SZ
3704 break;
3705 }
3706
984263bc
MD
3707 /*
3708 * Pack the data into the transmit ring. If we
3709 * don't have room, set the OACTIVE flag and wait
3710 * for the NIC to drain the ring.
3711 */
aad8b3fe 3712 if (bge_encap(sc, &m_head, &prodidx, &nsegs)) {
9ed293e0 3713 ifq_set_oactive(&ifp->if_snd);
d40991ef 3714 IFNET_STAT_INC(ifp, oerrors, 1);
984263bc
MD
3715 break;
3716 }
3717
aad8b3fe
SZ
3718 if (nsegs >= sc->bge_tx_wreg) {
3719 bge_xmit(sc, prodidx);
3720 nsegs = 0;
3721 }
984263bc 3722
aad8b3fe 3723 ETHER_BPF_MTAP(ifp, m_head);
2f54d1d2 3724
aad8b3fe
SZ
3725 /*
3726 * Set a timeout in case the chip goes out to lunch.
3727 */
3728 ifp->if_timer = 5;
3729 }
984263bc 3730
aad8b3fe
SZ
3731 if (nsegs > 0)
3732 bge_xmit(sc, prodidx);
94db8384 3733 sc->bge_tx_prodidx = prodidx;
984263bc
MD
3734}
3735
3736static void
33c39a69 3737bge_init(void *xsc)
984263bc
MD
3738{
3739 struct bge_softc *sc = xsc;
33c39a69
JS
3740 struct ifnet *ifp = &sc->arpcom.ac_if;
3741 uint16_t *m;
35b635f6 3742 uint32_t mode;
984263bc 3743
aa65409c
SZ
3744 ASSERT_SERIALIZED(ifp->if_serializer);
3745
984263bc
MD
3746 /* Cancel pending I/O and flush buffers. */
3747 bge_stop(sc);
57b62224
SZ
3748
3749 bge_stop_fw(sc);
3750 bge_sig_pre_reset(sc, BGE_RESET_START);
984263bc 3751 bge_reset(sc);
57b62224
SZ
3752 bge_sig_legacy(sc, BGE_RESET_START);
3753 bge_sig_post_reset(sc, BGE_RESET_START);
3754
984263bc
MD
3755 bge_chipinit(sc);
3756
3757 /*
3758 * Init the various state machines, ring
3759 * control blocks and firmware.
3760 */
3761 if (bge_blockinit(sc)) {
c6fd6f3b 3762 if_printf(ifp, "initialization failure\n");
1436f9a0 3763 bge_stop(sc);
984263bc
MD
3764 return;
3765 }
3766
984263bc
MD
3767 /* Specify MTU. */
3768 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
011c0f93 3769 ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
984263bc
MD
3770
3771 /* Load our MAC address. */
33c39a69 3772 m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
984263bc
MD
3773 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3774 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3775
3776 /* Enable or disable promiscuous mode as needed. */
6439b28a 3777 bge_setpromisc(sc);
984263bc
MD
3778
3779 /* Program multicast filter. */
3780 bge_setmulti(sc);
3781
3782 /* Init RX ring. */
1436f9a0
SZ
3783 if (bge_init_rx_ring_std(sc)) {
3784 if_printf(ifp, "RX ring initialization failed\n");
3785 bge_stop(sc);
3786 return;
3787 }
984263bc 3788
7e40b8c5
HP
3789 /*
3790 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3791 * memory to insure that the chip has in fact read the first
3792 * entry of the ring.
3793 */
3794 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
33c39a69 3795 uint32_t v, i;
7e40b8c5
HP
3796 for (i = 0; i < 10; i++) {
3797 DELAY(20);
3798 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3799 if (v == (MCLBYTES - ETHER_ALIGN))
3800 break;
3801 }
3802 if (i == 10)
c6fd6f3b 3803 if_printf(ifp, "5705 A0 chip failed to load RX ring\n");
7e40b8c5
HP
3804 }
3805
984263bc 3806 /* Init jumbo RX ring. */
1436f9a0
SZ
3807 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) {
3808 if (bge_init_rx_ring_jumbo(sc)) {
3809 if_printf(ifp, "Jumbo RX ring initialization failed\n");
3810 bge_stop(sc);
3811 return;
3812 }
3813 }
984263bc
MD
3814
3815 /* Init our RX return ring index */
3816 sc->bge_rx_saved_considx = 0;
3817
3818 /* Init TX ring. */
3819 bge_init_tx_ring(sc);
3820
35b635f6
SZ
3821 /* Enable TX MAC state machine lockup fix. */
3822 mode = CSR_READ_4(sc, BGE_TX_MODE);
3823 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
3824 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
984263bc 3825 /* Turn on transmitter */
35b635f6 3826 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
9062031e 3827 DELAY(100);
984263bc
MD
3828
3829 /* Turn on receiver */
ea320e53
SZ
3830 mode = CSR_READ_4(sc, BGE_RX_MODE);
3831 if (BGE_IS_5755_PLUS(sc))
3832 mode |= BGE_RXMODE_IPV6_ENABLE;
3833 CSR_WRITE_4(sc, BGE_RX_MODE, mode | BGE_RXMODE_ENABLE);
9062031e 3834 DELAY(10);
984263bc 3835
5fe22f8b
SZ
3836 /*
3837 * Set the number of good frames to receive after RX MBUF
3838 * Low Watermark has been reached. After the RX MAC receives
3839 * this number of frames, it will drop subsequent incoming
3840 * frames until the MBUF High Watermark is reached.
3841 */
3842 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
3843
308dcd8e
SZ
3844 if (sc->bge_irq_type == PCI_INTR_TYPE_MSI) {
3845 if (bootverbose) {
3846 if_printf(ifp, "MSI_MODE: %#x\n",
3847 CSR_READ_4(sc, BGE_MSI_MODE));
3848 }
3849
3850 /*
3851 * XXX
3852 * Linux driver turns it on for all chips supporting MSI?!
3853 */
3854 if (sc->bge_flags & BGE_FLAG_ONESHOT_MSI) {
3855 /*
3856 * XXX
3857 * According to 5722-PG101-R,
3858 * BGE_PCIE_TRANSACT_ONESHOT_MSI applies only to
3859 * BCM5906.
3860 */
3861 BGE_SETBIT(sc, BGE_PCIE_TRANSACT,
3862 BGE_PCIE_TRANSACT_ONESHOT_MSI);
3863 }
3864 }
3865
984263bc
MD
3866 /* Tell firmware we're alive. */
3867 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3868
ba39cc82 3869 /* Enable host interrupts if polling(4) is not enabled. */
91336132 3870 PCI_SETBIT(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA, 4);
7e1b2526
SZ
3871#ifdef IFPOLL_ENABLE
3872 if (ifp->if_flags & IFF_NPOLLING)
ba39cc82
SZ
3873 bge_disable_intr(sc);
3874 else
315fe0ee 3875#endif
ba39cc82 3876 bge_enable_intr(sc);
984263bc 3877
984263bc 3878 ifp->if_flags |= IFF_RUNNING;
9ed293e0 3879 ifq_clr_oactive(&ifp->if_snd);
984263bc 3880
57b62224
SZ
3881 bge_ifmedia_upd(ifp);
3882
263489fb 3883 callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
984263bc
MD
3884}
3885
3886/*
3887 * Set media options.
3888 */
3889static int
33c39a69 3890bge_ifmedia_upd(struct ifnet *ifp)
984263bc 3891{
33c39a69 3892 struct bge_softc *sc = ifp->if_softc;
984263bc
MD
3893
3894 /* If this is a 1000baseX NIC, enable the TBI port. */
0ecb11d7 3895 if (sc->bge_flags & BGE_FLAG_TBI) {
db861466
SZ
3896 struct ifmedia *ifm = &sc->bge_ifmedia;
3897
984263bc
MD
3898 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3899 return(EINVAL);
db861466 3900
984263bc
MD
3901 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3902 case IFM_AUTO:
70059b3c
JS
3903 /*
3904 * The BCM5704 ASIC appears to have a special
3905 * mechanism for programming the autoneg
3906 * advertisement registers in TBI mode.
3907 */
5c56d5d8
SZ
3908 if (!bge_fake_autoneg &&
3909 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
70059b3c
JS
3910 uint32_t sgdig;
3911
3912 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3913 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3914 sgdig |= BGE_SGDIGCFG_AUTO |
3915 BGE_SGDIGCFG_PAUSE_CAP |
3916 BGE_SGDIGCFG_ASYM_PAUSE;
3917 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3918 sgdig | BGE_SGDIGCFG_SEND);
3919 DELAY(5);
3920 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3921 }
984263bc
MD
3922 break;
3923 case IFM_1000_SX:
3924 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3925 BGE_CLRBIT(sc, BGE_MAC_MODE,
3926 BGE_MACMODE_HALF_DUPLEX);
3927 } else {
3928 BGE_SETBIT(sc, BGE_MAC_MODE,
3929 BGE_MACMODE_HALF_DUPLEX);
3930 }
b87c7034 3931 DELAY(40);
984263bc
MD
3932 break;
3933 default:
3934 return(EINVAL);
3935 }
db861466
SZ
3936 } else {
3937 struct mii_data *mii = device_get_softc(sc->bge_miibus);
984263bc 3938
db861466 3939 sc->bge_link_evt++;
3f82ed83 3940 sc->bge_link = 0;
db861466
SZ
3941 if (mii->mii_instance) {
3942 struct mii_softc *miisc;
984263bc 3943
db861466
SZ
3944 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3945 mii_phy_reset(miisc);
3946 }
3947 mii_mediachg(mii);
5225ba10
SZ
3948
3949 /*
3950 * Force an interrupt so that we will call bge_link_upd
3951 * if needed and clear any pending link state attention.
3952 * Without this we are not getting any further interrupts
3953 * for link state changes and thus will not UP the link and
3954 * not be able to send in bge_start. The only way to get
3955 * things working was to receive a packet and get an RX
3956 * intr.
3957 *
3958 * bge_tick should help for fiber cards and we might not
3959 * need to do this here if BGE_FLAG_TBI is set but as
3960 * we poll for fiber anyway it should not harm.
3961 */
14852ccc 3962 if (BGE_IS_CRIPPLED(sc))
5225ba10
SZ
3963 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3964 else
3965 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
db861466 3966 }
984263bc
MD
3967 return(0);
3968}
3969
3970/*
3971 * Report current media status.
3972 */
3973static void
33c39a69 3974bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
984263bc 3975{
33c39a69 3976 struct bge_softc *sc = ifp->if_softc;
984263bc 3977
57b62224
SZ
3978 if ((ifp->if_flags & IFF_RUNNING) == 0)
3979 return;
3980
0ecb11d7 3981 if (sc->bge_flags & BGE_FLAG_TBI) {
984263bc
MD
3982 ifmr->ifm_status = IFM_AVALID;
3983 ifmr->ifm_active = IFM_ETHER;
3984 if (CSR_READ_4(sc, BGE_MAC_STS) &
db861466 3985 BGE_MACSTAT_TBI_PCS_SYNCHED) {
984263bc 3986 ifmr->ifm_status |= IFM_ACTIVE;
db861466
SZ
3987 } else {
3988 ifmr->ifm_active |= IFM_NONE;
3989 return;
3990 }
3991
984263bc
MD
3992 ifmr->ifm_active |= IFM_1000_SX;
3993 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3994 ifmr->ifm_active |= IFM_HDX;
3995 else
3996 ifmr->ifm_active |= IFM_FDX;
db861466
SZ
3997 } else {
3998 struct mii_data *mii = device_get_softc(sc->bge_miibus);
984263bc 3999
db861466
SZ
4000 mii_pollstat(mii);
4001 ifmr->ifm_active = mii->mii_media_active;
4002 ifmr->ifm_status = mii->mii_media_status;
4003 }
984263bc
MD
4004}
4005
4006static int
33c39a69 4007bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
984263bc
MD
4008{
4009 struct bge_softc *sc = ifp->if_softc;
98dabdac 4010 struct ifreq *ifr = (struct ifreq *)data;
9a717c15 4011 int mask, error = 0;
984263bc 4012
aa65409c
SZ
4013 ASSERT_SERIALIZED(ifp->if_serializer);
4014
98dabdac 4015 switch (command) {
984263bc 4016 case SIOCSIFMTU:
0ecb11d7
SZ
4017 if ((!BGE_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) ||
4018 (BGE_IS_JUMBO_CAPABLE(sc) &&
4019 ifr->ifr_mtu > BGE_JUMBO_MTU)) {
984263bc 4020 error = EINVAL;
0ecb11d7 4021 } else if (ifp->if_mtu != ifr->ifr_mtu) {
984263bc 4022 ifp->if_mtu = ifr->ifr_mtu;
8ecafb73
SZ
4023 if (ifp->if_flags & IFF_RUNNING)
4024 bge_init(sc);
984263bc
MD
4025 }
4026 break;
4027 case SIOCSIFFLAGS:
4028 if (ifp->if_flags & IFF_UP) {
6439b28a 4029 if (ifp->if_flags & IFF_RUNNING) {
98dabdac 4030 mask = ifp->if_flags ^ sc->bge_if_flags;
6439b28a
SZ
4031
4032 /*
4033 * If only the state of the PROMISC flag
4034 * changed, then just use the 'set promisc
4035 * mode' command instead of reinitializing
4036 * the entire NIC. Doing a full re-init
4037 * means reloading the firmware and waiting
4038 * for it to start up, which may take a
4039 * second or two. Similarly for ALLMULTI.
4040 */
98dabdac 4041 if (mask & IFF_PROMISC)
6439b28a 4042 bge_setpromisc(sc);
98dabdac 4043 if (mask & IFF_ALLMULTI)
6439b28a
SZ
4044 bge_setmulti(sc);
4045 } else {
984263bc 4046 bge_init(sc);
6439b28a 4047 }
8ecafb73
SZ
4048 } else if (ifp->if_flags & IFF_RUNNING) {
4049 bge_stop(sc);
984263bc
MD
4050 }
4051 sc->bge_if_flags = ifp->if_flags;
984263bc
MD
4052 break;
4053 case SIOCADDMULTI:
4054 case SIOCDELMULTI:
98dabdac 4055 if (ifp->if_flags & IFF_RUNNING)
984263bc 4056 bge_setmulti(sc);
984263bc
MD
4057 break;
4058 case SIOCSIFMEDIA:
4059 case SIOCGIFMEDIA:
0ecb11d7 4060 if (sc->bge_flags & BGE_FLAG_TBI) {
984263bc
MD
4061 error = ifmedia_ioctl(ifp, ifr,
4062 &sc->bge_ifmedia, command);
4063 } else {
98dabdac
SZ
4064 struct mii_data *mii;
4065
984263bc
MD
4066 mii = device_get_softc(sc->bge_miibus);
4067 error = ifmedia_ioctl(ifp, ifr,
98dabdac 4068 &mii->mii_media, command);
984263bc
MD
4069 }
4070 break;
4071 case SIOCSIFCAP:
4072 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4073 if (mask & IFCAP_HWCSUM) {
71e2c3e7 4074 ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
e92f005c
SZ
4075 if (ifp->if_capenable & IFCAP_TXCSUM)
4076 ifp->if_hwassist |= BGE_CSUM_FEATURES;
984263bc 4077 else
e92f005c
SZ
4078 ifp->if_hwassist &= ~BGE_CSUM_FEATURES;
4079 }
4080 if (mask & IFCAP_TSO) {
4081 ifp->if_capenable ^= IFCAP_TSO;
4082 if (ifp->if_capenable & IFCAP_TSO)
4083 ifp->if_hwassist |= CSUM_TSO;
4084 else
4085 ifp->if_hwassist &= ~CSUM_TSO;
984263bc 4086 }
984263bc
MD
4087 break;
4088 default:
4cde4dd5 4089 error = ether_ioctl(ifp, command, data);
984263bc
MD
4090 break;
4091 }
98dabdac 4092 return error;
984263bc
MD
4093}
4094
4095static void
33c39a69 4096bge_watchdog(struct ifnet *ifp)
984263bc 4097{
33c39a69 4098 struct bge_softc *sc = ifp->if_softc;
984263bc 4099
c6fd6f3b 4100 if_printf(ifp, "watchdog timeout -- resetting\n");
984263bc 4101
984263bc
MD
4102 bge_init(sc);
4103
d40991ef 4104 IFNET_STAT_INC(ifp, oerrors, 1);
2f54d1d2
SZ
4105
4106 if (!ifq_is_empty(&ifp->if_snd))
9db4b353 4107 if_devstart(ifp);
984263bc
MD
4108}
4109
4110/*
4111 * Stop the adapter and free any mbufs allocated to the
4112 * RX and TX lists.
4113 */
4114static void
33c39a69 4115bge_stop(struct bge_softc *sc)
984263bc 4116{
33c39a69 4117 struct ifnet *ifp = &sc->arpcom.ac_if;
984263bc 4118
aa65409c
SZ
4119 ASSERT_SERIALIZED(ifp->if_serializer);
4120
263489fb 4121 callout_stop(&sc->bge_stat_timer);
984263bc 4122
57b62224
SZ
4123 /* Disable host interrupts. */
4124 bge_disable_intr(sc);
4125
4126 /*
4127 * Tell firmware we're shutting down.
4128 */
4129 bge_stop_fw(sc);
4130 bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN);
4131
984263bc
MD
4132 /*
4133 * Disable all of the receiver blocks
4134 */
6ac6e1b9
SZ
4135 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4136 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
4137 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
4138 if (BGE_IS_5700_FAMILY(sc))
4139 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
4140 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
4141 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
4142 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
984263bc
MD
4143
4144 /*
4145 * Disable all of the transmit blocks
4146 */
6ac6e1b9
SZ
4147 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
4148 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
4149 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
4150 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
4151 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
4152 if (BGE_IS_5700_FAMILY(sc))
4153 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
4154 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
984263bc
MD
4155
4156 /*
4157 * Shut down all of the memory managers and related
4158 * state machines.
4159 */
6ac6e1b9
SZ
4160 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
4161 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
4162 if (BGE_IS_5700_FAMILY(sc))
4163 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
984263bc
MD
4164 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
4165 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
0ecb11d7 4166 if (!BGE_IS_5705_PLUS(sc)) {
7e40b8c5
HP
4167 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
4168 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
4169 }
984263bc 4170
57b62224
SZ
4171 bge_reset(sc);
4172 bge_sig_legacy(sc, BGE_RESET_SHUTDOWN);
4173 bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN);
984263bc
MD
4174
4175 /*
57b62224 4176 * Keep the ASF firmware running if up.
984263bc 4177 */
57b62224
SZ
4178 if (sc->bge_asf_mode & ASF_STACKUP)
4179 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4180 else
4181 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
984263bc
MD
4182
4183 /* Free the RX lists. */
4184 bge_free_rx_ring_std(sc);
4185
4186 /* Free jumbo RX list. */
0ecb11d7 4187 if (BGE_IS_JUMBO_CAPABLE(sc))
7e40b8c5 4188 bge_free_rx_ring_jumbo(sc);
984263bc
MD
4189
4190 /* Free TX buffers. */
4191 bge_free_tx_ring(sc);
4192
90ad1c96 4193 sc->bge_status_tag = 0;
984263bc 4194 sc->bge_link = 0;
055d06f0 4195 sc->bge_coal_chg = 0;
984263bc
MD
4196
4197 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
4198
9ed293e0
SZ
4199 ifp->if_flags &= ~IFF_RUNNING;
4200 ifq_clr_oactive(&ifp->if_snd);
e2c1cee4 4201 ifp->if_timer = 0;
984263bc
MD
4202}
4203
4204/*
4205 * Stop all chip I/O so that the kernel's probe routines don't
4206 * get confused by errant DMAs when rebooting.
4207 */
4208static void
33c39a69 4209bge_shutdown(device_t dev)
984263bc 4210{
33c39a69 4211 struct bge_softc *sc = device_get_softc(dev);
aa65409c 4212 struct ifnet *ifp = &sc->arpcom.ac_if;
984263bc 4213
aa65409c
SZ
4214 lwkt_serialize_enter(ifp->if_serializer);
4215 bge_stop(sc);
aa65409c
SZ
4216 lwkt_serialize_exit(ifp->if_serializer);
4217}
4218
4219static int
4220bge_suspend(device_t dev)
4221{
4222 struct bge_softc *sc = device_get_softc(dev);
4223 struct ifnet *ifp = &sc->arpcom.ac_if;
4224
4225 lwkt_serialize_enter(ifp->if_serializer);
4226 bge_stop(sc);
4227 lwkt_serialize_exit(ifp->if_serializer);
4228
4229 return 0;
4230}
4231
4232static int
4233bge_resume(device_t dev)
4234{
4235 struct bge_softc *sc = device_get_softc(dev);
4236 struct ifnet *ifp = &sc->arpcom.ac_if;
4237
4238 lwkt_serialize_enter(ifp->if_serializer);
4239
4240 if (ifp->if_flags & IFF_UP) {
4241 bge_init(sc);
4242
20c9a969 4243 if (!ifq_is_empty(&ifp->if_snd))
9db4b353 4244 if_devstart(ifp);
aa65409c
SZ
4245 }
4246
4247 lwkt_serialize_exit(ifp->if_serializer);
4248
4249 return 0;
984263bc 4250}
6439b28a
SZ
4251
4252static void
4253bge_setpromisc(struct bge_softc *sc)
4254{
4255 struct ifnet *ifp = &sc->arpcom.ac_if;
4256
4257 if (ifp->if_flags & IFF_PROMISC)
4258 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
4259 else
4260 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
4261}
20c9a969 4262
20c9a969
SZ
4263static void
4264bge_dma_free(struct bge_softc *sc)
4265{
4266 int i;
4267
ddca511d
SZ
4268 /* Destroy RX mbuf DMA stuffs. */
4269 if (sc->bge_cdata.bge_rx_mtag != NULL) {
20c9a969 4270 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
ddca511d
SZ
4271 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
4272 sc->bge_cdata.bge_rx_std_dmamap[i]);
20c9a969 4273 }
1436f9a0
SZ
4274 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
4275 sc->bge_cdata.bge_rx_tmpmap);
ddca511d
SZ
4276 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
4277 }
20c9a969 4278
ddca511d
SZ
4279 /* Destroy TX mbuf DMA stuffs. */
4280 if (sc->bge_cdata.bge_tx_mtag != NULL) {
20c9a969 4281 for (i = 0; i < BGE_TX_RING_CNT; i++) {
ddca511d
SZ
4282 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
4283 sc->bge_cdata.bge_tx_dmamap[i]);
20c9a969 4284 }
ddca511d 4285 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
20c9a969
SZ
4286 }
4287
4288 /* Destroy standard RX ring */
4289 bge_dma_block_free(sc->bge_cdata.bge_rx_std_ring_tag,
4290 sc->bge_cdata.bge_rx_std_ring_map,
4291 sc->bge_ldata.bge_rx_std_ring);
4292
0ecb11d7 4293 if (BGE_IS_JUMBO_CAPABLE(sc))
20c9a969
SZ
4294 bge_free_jumbo_mem(sc);
4295
4296 /* Destroy RX return ring */
4297 bge_dma_block_free(sc->bge_cdata.bge_rx_return_ring_tag,
4298 sc->bge_cdata.bge_rx_return_ring_map,
4299 sc->bge_ldata.bge_rx_return_ring);
4300
4301 /* Destroy TX ring */
4302 bge_dma_block_free(sc->bge_cdata.bge_tx_ring_tag,
4303 sc->bge_cdata.bge_tx_ring_map,
4304 sc->bge_ldata.bge_tx_ring);
4305
4306 /* Destroy status block */
4307 bge_dma_block_free(sc->bge_cdata.bge_status_tag,
4308 sc->bge_cdata.bge_status_map,
4309 sc->bge_ldata.bge_status_block);
4310
4311 /* Destroy statistics block */
4312 bge_dma_block_free(sc->bge_cdata.bge_stats_tag,
4313 sc->bge_cdata.bge_stats_map,
4314 sc->bge_ldata.bge_stats);
4315
4316 /* Destroy the parent tag */
4317 if (sc->bge_cdata.bge_parent_tag != NULL)
4318 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
4319}
4320
4321static int
4322bge_dma_alloc(struct bge_softc *sc)
4323{
4324 struct ifnet *ifp = &sc->arpcom.ac_if;
98e35a04 4325 int i, error;
d723dbb5 4326 bus_addr_t lowaddr;
e92f005c 4327 bus_size_t txmaxsz;
d723dbb5
SZ
4328
4329 lowaddr = BUS_SPACE_MAXADDR;
4330 if (sc->bge_flags & BGE_FLAG_MAXADDR_40BIT)
4331 lowaddr = BGE_DMA_MAXADDR_40BIT;
20c9a969
SZ
4332
4333 /*
4334 * Allocate the parent bus DMA tag appropriate for PCI.
d79bf78f
SZ
4335 *
4336 * All of the NetExtreme/NetLink controllers have 4GB boundary
4337 * DMA bug.
4338 * Whenever an address crosses a multiple of the 4GB boundary
4339 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
4340 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
4341 * state machine will lockup and cause the device to hang.
20c9a969 4342 */
d79bf78f 4343 error = bus_dma_tag_create(NULL, 1, BGE_DMA_BOUNDARY_4G,
d723dbb5 4344 lowaddr, BUS_SPACE_MAXADDR,
20c9a969 4345 NULL, NULL,
98e35a04 4346 BUS_SPACE_MAXSIZE_32BIT, 0,
20c9a969
SZ
4347 BUS_SPACE_MAXSIZE_32BIT,
4348 0, &sc->bge_cdata.bge_parent_tag);
4349 if (error) {
4350 if_printf(ifp, "could not allocate parent dma tag\n");
4351 return error;
4352 }
4353
4354 /*
ddca511d 4355 * Create DMA tag and maps for RX mbufs.
20c9a969 4356 */
20c9a969
SZ
4357 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0,
4358 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
ddca511d 4359 NULL, NULL, MCLBYTES, 1, MCLBYTES,
98e35a04 4360 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
ddca511d 4361 &sc->bge_cdata.bge_rx_mtag);
20c9a969 4362 if (error) {
ddca511d 4363 if_printf(ifp, "could not allocate RX mbuf dma tag\n");
20c9a969
SZ
4364 return error;
4365 }
4366
1436f9a0
SZ
4367 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag,
4368 BUS_DMA_WAITOK, &sc->bge_cdata.bge_rx_tmpmap);
4369 if (error) {
4370 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
4371 sc->bge_cdata.bge_rx_mtag = NULL;
4372 return error;
4373 }
4374
20c9a969 4375 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
ddca511d 4376 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag,
98e35a04 4377 BUS_DMA_WAITOK,
20c9a969
SZ
4378 &sc->bge_cdata.bge_rx_std_dmamap[i]);
4379 if (error) {
4380 int j;
4381
4382 for (j = 0; j < i; ++j) {
ddca511d 4383 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
20c9a969
SZ
4384 sc->bge_cdata.bge_rx_std_dmamap[j]);
4385 }
ddca511d
SZ
4386 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
4387 sc->bge_cdata.bge_rx_mtag = NULL;
20c9a969
SZ
4388
4389 if_printf(ifp, "could not create DMA map for RX\n");
4390 return error;
4391 }
4392 }
4393
ddca511d
SZ
4394 /*
4395 * Create DMA tag and maps for TX mbufs.
4396 */
e92f005c
SZ
4397 if (sc->bge_flags & BGE_FLAG_TSO)
4398 txmaxsz = IP_MAXPACKET + sizeof(struct ether_vlan_header);
4399 else
4400 txmaxsz = BGE_JUMBO_FRAMELEN;
ddca511d
SZ
4401 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0,
4402 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
4403 NULL, NULL,
e92f005c 4404 txmaxsz, BGE_NSEG_NEW, PAGE_SIZE,
ddca511d
SZ
4405 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK |
4406 BUS_DMA_ONEBPAGE,
4407 &sc->bge_cdata.bge_tx_mtag);
4408 if (error) {
4409 if_printf(ifp, "could not allocate TX mbuf dma tag\n");
4410 return error;
4411 }
4412
20c9a969 4413 for (i = 0; i < BGE_TX_RING_CNT; i++) {
ddca511d
SZ
4414 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag,
4415 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
20c9a969
SZ
4416 &sc->bge_cdata.bge_tx_dmamap[i]);
4417 if (error) {
4418 int j;
4419
20c9a969 4420 for (j = 0; j < i; ++j) {
ddca511d 4421 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
20c9a969
SZ
4422 sc->bge_cdata.bge_tx_dmamap[j]);
4423 }
ddca511d
SZ
4424 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
4425 sc->bge_cdata.bge_tx_mtag = NULL;
20c9a969
SZ
4426
4427 if_printf(ifp, "could not create DMA map for TX\n");
4428 return error;
4429 }
4430 }
4431
4432 /*
4433 * Create DMA stuffs for standard RX ring.
4434 */
4435 error = bge_dma_block_alloc(sc, BGE_STD_RX_RING_SZ,
4436 &sc->bge_cdata.bge_rx_std_ring_tag,
4437 &sc->bge_cdata.bge_rx_std_ring_map,
da44240f 4438 (void *)&sc->bge_ldata.bge_rx_std_ring,
20c9a969
SZ
4439 &sc->bge_ldata.bge_rx_std_ring_paddr);
4440 if (error) {
4441 if_printf(ifp, "could not create std RX ring\n");
4442 return error;
4443 }
4444
4445 /*
4446 * Create jumbo buffer pool.
4447 */
0ecb11d7 4448 if (BGE_IS_JUMBO_CAPABLE(sc)) {
20c9a969
SZ
4449 error = bge_alloc_jumbo_mem(sc);
4450 if (error) {
4451 if_printf(ifp, "could not create jumbo buffer pool\n");
4452 return error;
4453 }
4454 }
4455
4456 /*
4457 * Create DMA stuffs for RX return ring.
4458 */
8ff8bce6
SZ
4459 error = bge_dma_block_alloc(sc,
4460 BGE_RX_RTN_RING_SZ(sc->bge_return_ring_cnt),
4461 &sc->bge_cdata.bge_rx_return_ring_tag,
4462 &sc->bge_cdata.bge_rx_return_ring_map,
4463 (void *)&sc->bge_ldata.bge_rx_return_ring,
4464 &sc->bge_ldata.bge_rx_return_ring_paddr);
20c9a969
SZ
4465 if (error) {
4466 if_printf(ifp, "could not create RX ret ring\n");
4467 return error;
4468 }
4469
4470 /*
4471 * Create DMA stuffs for TX ring.
4472 */
4473 error = bge_dma_block_alloc(sc, BGE_TX_RING_SZ,
4474 &sc->bge_cdata.bge_tx_ring_tag,
4475 &sc->bge_cdata.bge_tx_ring_map,
da44240f 4476 (void *)&sc->bge_ldata.bge_tx_ring,
20c9a969
SZ
4477 &sc->bge_ldata.bge_tx_ring_paddr);
4478 if (error) {
4479 if_printf(ifp, "could not create TX ring\n");
4480 return error;
4481 }
4482
4483 /*
4484 * Create DMA stuffs for status block.
4485 */
4486 error = bge_dma_block_alloc(sc, BGE_STATUS_BLK_SZ,
4487 &sc->bge_cdata.bge_status_tag,
4488 &sc->bge_cdata.bge_status_map,
da44240f 4489 (void *)&sc->bge_ldata.bge_status_block,
20c9a969
SZ
4490 &sc->bge_ldata.bge_status_block_paddr);
4491 if (error) {
4492 if_printf(ifp, "could not create status block\n");
4493 return error;
4494 }
4495
4496 /*
4497 * Create DMA stuffs for statistics block.
4498 */
4499 error = bge_dma_block_alloc(sc, BGE_STATS_SZ,
4500 &sc->bge_cdata.bge_stats_tag,
4501 &sc->bge_cdata.bge_stats_map,
da44240f 4502 (void *)&sc->bge_ldata.bge_stats,
20c9a969
SZ
4503 &sc->bge_ldata.bge_stats_paddr);
4504 if (error) {
4505 if_printf(ifp, "could not create stats block\n");
4506 return error;
4507 }
4508 return 0;
4509}
4510
4511static int
4512bge_dma_block_alloc(struct bge_softc *sc, bus_size_t size, bus_dma_tag_t *tag,
4513 bus_dmamap_t *map, void **addr, bus_addr_t *paddr)
4514{
b6e13fa1 4515 bus_dmamem_t dmem;
20c9a969
SZ
4516 int error;
4517
b6e13fa1
SZ
4518 error = bus_dmamem_coherent(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0,
4519 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
4520 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
4521 if (error)
20c9a969 4522 return error;
20c9a969 4523
b6e13fa1
SZ
4524 *tag = dmem.dmem_tag;
4525 *map = dmem.dmem_map;
4526 *addr = dmem.dmem_addr;
4527 *paddr = dmem.dmem_busaddr;
20c9a969
SZ
4528
4529 return 0;
4530}
4531
4532static void
4533bge_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr)
4534{
4535 if (tag != NULL) {
4536 bus_dmamap_unload(tag, map);
4537 bus_dmamem_free(tag, addr, map);
4538 bus_dma_tag_destroy(tag);
4539 }
4540}
db861466
SZ
4541
4542/*
4543 * Grrr. The link status word in the status block does
4544 * not work correctly on the BCM5700 rev AX and BX chips,
4545 * according to all available information. Hence, we have
4546 * to enable MII interrupts in order to properly obtain
4547 * async link changes. Unfortunately, this also means that
4548 * we have to read the MAC status register to detect link
4549 * changes, thereby adding an additional register access to
4550 * the interrupt handler.
4551 *
4552 * XXX: perhaps link state detection procedure used for
4553 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
4554 */
4555static void
4556bge_bcm5700_link_upd(struct bge_softc *sc, uint32_t status __unused)
4557{
4558 struct ifnet *ifp = &sc->arpcom.ac_if;
4559 struct mii_data *mii = device_get_softc(sc->bge_miibus);
4560
4561 mii_pollstat(mii);
4562
4563 if (!sc->bge_link &&
4564 (mii->mii_media_status & IFM_ACTIVE) &&
4565 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4566 sc->bge_link++;
4567 if (bootverbose)
4568 if_printf(ifp, "link UP\n");
4569 } else if (sc->bge_link &&
4570 (!(mii->mii_media_status & IFM_ACTIVE) ||
4571 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4572 sc->bge_link = 0;
4573 if (bootverbose)
4574 if_printf(ifp, "link DOWN\n");
4575 }
4576
4577 /* Clear the interrupt. */
4578 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_MI_INTERRUPT);
4579 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
4580 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, BRGPHY_INTRS);
4581}
4582
4583static void
4584bge_tbi_link_upd(struct bge_softc *sc, uint32_t status)
4585{
4586 struct ifnet *ifp = &sc->arpcom.ac_if;
4587
4588#define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE)
4589
4590 /*
4591 * Sometimes PCS encoding errors are detected in
4592 * TBI mode (on fiber NICs), and for some reason
4593 * the chip will signal them as link changes.
4594 * If we get a link change event, but the 'PCS
4595 * encoding error' bit in the MAC status register
4596 * is set, don't bother doing a link check.
4597 * This avoids spurious "gigabit link up" messages
4598 * that sometimes appear on fiber NICs during
4599 * periods of heavy traffic.
4600 */
4601 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
4602 if (!sc->bge_link) {
4603 sc->bge_link++;
4604 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
4605 BGE_CLRBIT(sc, BGE_MAC_MODE,
4606 BGE_MACMODE_TBI_SEND_CFGS);
b87c7034 4607 DELAY(40);
db861466
SZ
4608 }
4609 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
4610
4611 if (bootverbose)
4612 if_printf(ifp, "link UP\n");
4613
4614 ifp->if_link_state = LINK_STATE_UP;
4615 if_link_state_change(ifp);
4616 }
4617 } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) {
4618 if (sc->bge_link) {
4619 sc->bge_link = 0;
4620
4621 if (bootverbose)
4622 if_printf(ifp, "link DOWN\n");
4623
4624 ifp->if_link_state = LINK_STATE_DOWN;
4625 if_link_state_change(ifp);
4626 }
4627 }
4628
4629#undef PCS_ENCODE_ERR
4630
4631 /* Clear the attention. */
4632 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
4633 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
4634 BGE_MACSTAT_LINK_CHANGED);
4635}
4636
4637static void
4638bge_copper_link_upd(struct bge_softc *sc, uint32_t status __unused)
4639{
2dd0af35
SZ
4640 struct ifnet *ifp = &sc->arpcom.ac_if;
4641 struct mii_data *mii = device_get_softc(sc->bge_miibus);
db861466 4642
2dd0af35
SZ
4643 mii_pollstat(mii);
4644 bge_miibus_statchg(sc->bge_dev);
db861466 4645
2dd0af35
SZ
4646 if (bootverbose) {
4647 if (sc->bge_link)
4648 if_printf(ifp, "link UP\n");
4649 else
4650 if_printf(ifp, "link DOWN\n");
4651 }
4652
4653 /* Clear the attention. */
4654 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
4655 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
4656 BGE_MACSTAT_LINK_CHANGED);
4657}
4658
4659static void
4660bge_autopoll_link_upd(struct bge_softc *sc, uint32_t status __unused)
4661{
4662 struct ifnet *ifp = &sc->arpcom.ac_if;
4663 struct mii_data *mii = device_get_softc(sc->bge_miibus);
4664
4665 mii_pollstat(mii);
4666
4667 if (!sc->bge_link &&
4668 (mii->mii_media_status & IFM_ACTIVE) &&
4669 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4670 sc->bge_link++;
4671 if (bootverbose)
4672 if_printf(ifp, "link UP\n");
4673 } else if (sc->bge_link &&
4674 (!(mii->mii_media_status & IFM_ACTIVE) ||
4675 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4676 sc->bge_link = 0;
4677 if (bootverbose)
4678 if_printf(ifp, "link DOWN\n");
db861466
SZ
4679 }
4680
4681 /* Clear the attention. */
4682 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
4683 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
4684 BGE_MACSTAT_LINK_CHANGED);
4685}
055d06f0
SZ
4686
4687static int
4688bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS)
4689{
4690 struct bge_softc *sc = arg1;
4691
4692 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
489391fe
SZ
4693 &sc->bge_rx_coal_ticks,
4694 BGE_RX_COAL_TICKS_MIN, BGE_RX_COAL_TICKS_MAX,
4695 BGE_RX_COAL_TICKS_CHG);
055d06f0
SZ
4696}
4697
4698static int
4699bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS)
4700{
4701 struct bge_softc *sc = arg1;
4702
4703 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
489391fe
SZ
4704 &sc->bge_tx_coal_ticks,
4705 BGE_TX_COAL_TICKS_MIN, BGE_TX_COAL_TICKS_MAX,
4706 BGE_TX_COAL_TICKS_CHG);
055d06f0
SZ
4707}
4708
4709static int
90ad1c96
SZ
4710bge_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS)
4711{
4712 struct bge_softc *sc = arg1;
4713
4714 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
489391fe
SZ
4715 &sc->bge_rx_coal_bds,
4716 BGE_RX_COAL_BDS_MIN, BGE_RX_COAL_BDS_MAX,
4717 BGE_RX_COAL_BDS_CHG);
90ad1c96
SZ
4718}
4719
4720static int
4721bge_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS)
4722{
4723 struct bge_softc *sc = arg1;
4724
4725 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
489391fe
SZ
4726 &sc->bge_tx_coal_bds,
4727 BGE_TX_COAL_BDS_MIN, BGE_TX_COAL_BDS_MAX,
4728 BGE_TX_COAL_BDS_CHG);
90ad1c96
SZ
4729}
4730
4731static int
4732bge_sysctl_rx_coal_ticks_int(SYSCTL_HANDLER_ARGS)
4733{
4734 struct bge_softc *sc = arg1;
4735
4736 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
489391fe
SZ
4737 &sc->bge_rx_coal_ticks_int,
4738 BGE_RX_COAL_TICKS_MIN, BGE_RX_COAL_TICKS_MAX,
4739 BGE_RX_COAL_TICKS_INT_CHG);
90ad1c96
SZ
4740}
4741
4742static int
4743bge_sysctl_tx_coal_ticks_int(SYSCTL_HANDLER_ARGS)
4744{
4745 struct bge_softc *sc = arg1;
4746
4747 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
489391fe
SZ
4748 &sc->bge_tx_coal_ticks_int,
4749 BGE_TX_COAL_TICKS_MIN, BGE_TX_COAL_TICKS_MAX,
4750 BGE_TX_COAL_TICKS_INT_CHG);
90ad1c96
SZ
4751}
4752
4753static int
4754bge_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS)
055d06f0
SZ
4755{
4756 struct bge_softc *sc = arg1;
4757
4758 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
489391fe
SZ
4759 &sc->bge_rx_coal_bds_int,
4760 BGE_RX_COAL_BDS_MIN, BGE_RX_COAL_BDS_MAX,
4761 BGE_RX_COAL_BDS_INT_CHG);
055d06f0
SZ
4762}
4763
4764static int
90ad1c96 4765bge_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS)
055d06f0
SZ
4766{
4767 struct bge_softc *sc = arg1;
4768
4769 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
489391fe
SZ
4770 &sc->bge_tx_coal_bds_int,
4771 BGE_TX_COAL_BDS_MIN, BGE_TX_COAL_BDS_MAX,
4772 BGE_TX_COAL_BDS_INT_CHG);
055d06f0
SZ
4773}
4774
4775static int
4776bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal,
489391fe 4777 int coal_min, int coal_max, uint32_t coal_chg_mask)
055d06f0
SZ
4778{
4779 struct bge_softc *sc = arg1;
4780 struct ifnet *ifp = &sc->arpcom.ac_if;
4781 int error = 0, v;
4782
4783 lwkt_serialize_enter(ifp->if_serializer);
4784
4785 v = *coal;
4786 error = sysctl_handle_int(oidp, &v, 0, req);
4787 if (!error && req->newptr != NULL) {
489391fe 4788 if (v < coal_min || v > coal_max) {
055d06f0
SZ
4789 error = EINVAL;
4790 } else {
4791 *coal = v;
4792 sc->bge_coal_chg |= coal_chg_mask;
4793 }
4794 }
4795
4796 lwkt_serialize_exit(ifp->if_serializer);
4797 return error;
4798}
4799
4800static void
4801bge_coal_change(struct bge_softc *sc)
4802{
4803 struct ifnet *ifp = &sc->arpcom.ac_if;
055d06f0
SZ
4804
4805 ASSERT_SERIALIZED(ifp->if_serializer);
4806
4807 if (sc->bge_coal_chg & BGE_RX_COAL_TICKS_CHG) {
4808 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS,
4809 sc->bge_rx_coal_ticks);
4810 DELAY(10);
018e961b 4811 CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
055d06f0
SZ
4812
4813 if (bootverbose) {
4814 if_printf(ifp, "rx_coal_ticks -> %u\n",
4815 sc->bge_rx_coal_ticks);
4816 }
4817 }
4818
4819 if (sc->bge_coal_chg & BGE_TX_COAL_TICKS_CHG) {
4820 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS,
4821 sc->bge_tx_coal_ticks);
4822 DELAY(10);
018e961b 4823 CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS);
055d06f0
SZ
4824
4825 if (bootverbose) {
4826 if_printf(ifp, "tx_coal_ticks -> %u\n",
4827 sc->bge_tx_coal_ticks);
4828 }
4829 }
4830
90ad1c96 4831 if (sc->bge_coal_chg & BGE_RX_COAL_BDS_CHG) {
055d06f0 4832 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS,
90ad1c96 4833 sc->bge_rx_coal_bds);
055d06f0 4834 DELAY(10);
018e961b 4835 CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
055d06f0
SZ
4836
4837 if (bootverbose) {
90ad1c96
SZ
4838 if_printf(ifp, "rx_coal_bds -> %u\n",
4839 sc->bge_rx_coal_bds);
055d06f0
SZ
4840 }
4841 }
4842
90ad1c96 4843 if (sc->bge_coal_chg & BGE_TX_COAL_BDS_CHG) {
055d06f0 4844 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS,
90ad1c96 4845 sc->bge_tx_coal_bds);
055d06f0 4846 DELAY(10);
018e961b 4847 CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS);
055d06f0
SZ
4848
4849 if (bootverbose) {
4850 if_printf(ifp, "tx_max_coal_bds -> %u\n",
90ad1c96
SZ
4851 sc->bge_tx_coal_bds);
4852 }
4853 }
4854
4855 if (sc->bge_coal_chg & BGE_RX_COAL_TICKS_INT_CHG) {
4856 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT,
4857 sc->bge_rx_coal_ticks_int);
4858 DELAY(10);
018e961b 4859 CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS_INT);
90ad1c96
SZ
4860
4861 if (bootverbose) {
4862 if_printf(ifp, "rx_coal_ticks_int -> %u\n",
4863 sc->bge_rx_coal_ticks_int);
4864 }
4865 }
4866
4867 if (sc->bge_coal_chg & BGE_TX_COAL_TICKS_INT_CHG) {
4868 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT,
4869 sc->bge_tx_coal_ticks_int);
4870 DELAY(10);
018e961b 4871 CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS_INT);
90ad1c96
SZ
4872
4873 if (bootverbose) {
4874 if_printf(ifp, "tx_coal_ticks_int -> %u\n",
4875 sc->bge_tx_coal_ticks_int);
4876 }
4877 }
4878
4879 if (sc->bge_coal_chg & BGE_RX_COAL_BDS_INT_CHG) {
4880 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT,
4881 sc->bge_rx_coal_bds_int);
4882 DELAY(10);
018e961b 4883 CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT);
90ad1c96
SZ
4884
4885 if (bootverbose) {
4886 if_printf(ifp, "rx_coal_bds_int -> %u\n",
4887 sc->bge_rx_coal_bds_int);
4888 }
4889 }
4890
4891 if (sc->bge_coal_chg & BGE_TX_COAL_BDS_INT_CHG) {
4892 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT,
4893 sc->bge_tx_coal_bds_int);
4894 DELAY(10);
018e961b 4895 CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT);
90ad1c96
SZ
4896
4897 if (bootverbose) {
4898 if_printf(ifp, "tx_coal_bds_int -> %u\n",
4899 sc->bge_tx_coal_bds_int);
055d06f0
SZ
4900 }
4901 }
4902
4903 sc->bge_coal_chg = 0;
4904}
ba39cc82
SZ
4905
4906static void
4907bge_enable_intr(struct bge_softc *sc)
4908{
4909 struct ifnet *ifp = &sc->arpcom.ac_if;
4910
4911 lwkt_serialize_handler_enable(ifp->if_serializer);
4912
4913 /*
4914 * Enable interrupt.
4915 */
90ad1c96 4916 bge_writembx(sc, BGE_MBX_IRQ0_LO, sc->bge_status_tag << 24);
308dcd8e
SZ
4917 if (sc->bge_flags & BGE_FLAG_ONESHOT_MSI) {
4918 /* XXX Linux driver */
4919 bge_writembx(sc, BGE_MBX_IRQ0_LO, sc->bge_status_tag << 24);
4920 }
ba39cc82
SZ
4921
4922 /*
4923 * Unmask the interrupt when we stop polling.
4924 */
91336132
SZ
4925 PCI_CLRBIT(sc->bge_dev, BGE_PCI_MISC_CTL,
4926 BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
ba39cc82
SZ
4927
4928 /*
4929 * Trigger another interrupt, since above writing
4930 * to interrupt mailbox0 may acknowledge pending
4931 * interrupt.
4932 */
4933 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4934}
4935
4936static void
4937bge_disable_intr(struct bge_softc *sc)
4938{
4939 struct ifnet *ifp = &sc->arpcom.ac_if;
4940
4941 /*
4942 * Mask the interrupt when we start polling.
4943 */
91336132
SZ
4944 PCI_SETBIT(sc->bge_dev, BGE_PCI_MISC_CTL,
4945 BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
ba39cc82
SZ
4946
4947 /*
4948 * Acknowledge possible asserted interrupt.
4949 */
591dfc77 4950 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
ba39cc82 4951
7e1b2526
SZ
4952 sc->bge_npoll.ifpc_stcount = 0;
4953
ba39cc82
SZ
4954 lwkt_serialize_handler_disable(ifp->if_serializer);
4955}
591dfc77
SZ
4956
4957static int
4958bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
4959{
4960 uint32_t mac_addr;
4961 int ret = 1;
4962
4963 mac_addr = bge_readmem_ind(sc, 0x0c14);
4964 if ((mac_addr >> 16) == 0x484b) {
4965 ether_addr[0] = (uint8_t)(mac_addr >> 8);
4966 ether_addr[1] = (uint8_t)mac_addr;
4967 mac_addr = bge_readmem_ind(sc, 0x0c18);
4968 ether_addr[2] = (uint8_t)(mac_addr >> 24);
4969 ether_addr[3] = (uint8_t)(mac_addr >> 16);
4970 ether_addr[4] = (uint8_t)(mac_addr >> 8);
4971 ether_addr[5] = (uint8_t)mac_addr;
4972 ret = 0;
4973 }
4974 return ret;
4975}
4976
4977static int
4978bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
4979{
4980 int mac_offset = BGE_EE_MAC_OFFSET;
4981
4982 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
4983 mac_offset = BGE_EE_MAC_OFFSET_5906;
4984
4985 return bge_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN);
4986}
4987
4988static int
4989bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
4990{
4991 if (sc->bge_flags & BGE_FLAG_NO_EEPROM)
4992 return 1;
4993
4994 return bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
4995 ETHER_ADDR_LEN);
4996}
4997
4998static int
4999bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
5000{
5001 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
5002 /* NOTE: Order is critical */
5003 bge_get_eaddr_mem,
5004 bge_get_eaddr_nvram,
5005 bge_get_eaddr_eeprom,
5006 NULL
5007 };
5008 const bge_eaddr_fcn_t *func;
5009
5010 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
5011 if ((*func)(sc, eaddr) == 0)
5012 break;
5013 }
5014 return (*func == NULL ? ENXIO : 0);
5015}
e0b35c1f
SZ
5016
5017/*
5018 * NOTE: 'm' is not freed upon failure
5019 */
5020struct mbuf *
5021bge_defrag_shortdma(struct mbuf *m)
5022{
5023 struct mbuf *n;
5024 int found;
5025
5026 /*
5027 * If device receive two back-to-back send BDs with less than
5028 * or equal to 8 total bytes then the device may hang. The two
5029 * back-to-back send BDs must in the same frame for this failure
5030 * to occur. Scan mbuf chains and see whether two back-to-back
5031 * send BDs are there. If this is the case, allocate new mbuf
5032 * and copy the frame to workaround the silicon bug.
5033 */
5034 for (n = m, found = 0; n != NULL; n = n->m_next) {
5035 if (n->m_len < 8) {
5036 found++;
5037 if (found > 1)
5038 break;
5039 continue;
5040 }
5041 found = 0;
5042 }
5043
5044 if (found > 1)
b5523eac 5045 n = m_defrag(m, M_NOWAIT);
e0b35c1f
SZ
5046 else
5047 n = m;
5048 return n;
5049}
6ac6e1b9
SZ
5050
5051static void
5052bge_stop_block(struct bge_softc *sc, bus_size_t reg, uint32_t bit)
5053{
5054 int i;
5055
5056 BGE_CLRBIT(sc, reg, bit);
5057 for (i = 0; i < BGE_TIMEOUT; i++) {
5058 if ((CSR_READ_4(sc, reg) & bit) == 0)
5059 return;
5060 DELAY(100);
5061 }
5062}
e287b14f
SZ
5063
5064static void
5065bge_link_poll(struct bge_softc *sc)
5066{
5067 uint32_t status;
5068
5069 status = CSR_READ_4(sc, BGE_MAC_STS);
5070 if ((status & sc->bge_link_chg) || sc->bge_link_evt) {
5071 sc->bge_link_evt = 0;
5072 sc->bge_link_upd(sc, status);
5073 }
5074}
308dcd8e
SZ
5075
5076static void
5077bge_enable_msi(struct bge_softc *sc)
5078{
5079 uint32_t msi_mode;
5080
5081 msi_mode = CSR_READ_4(sc, BGE_MSI_MODE);
5082 msi_mode |= BGE_MSIMODE_ENABLE;
5083 if (sc->bge_flags & BGE_FLAG_ONESHOT_MSI) {
5084 /*
5085 * According to all of the datasheets that are publicly
5086 * available, bit 5 of the MSI_MODE is defined to be
5087 * "MSI FIFO Underrun Attn" for BCM5755+ and BCM5906, on
5088 * which "oneshot MSI" is enabled. However, it is always
5089 * safe to clear it here.
5090 */
5091 msi_mode &= ~BGE_MSIMODE_ONESHOT_DISABLE;
5092 }
5093 CSR_WRITE_4(sc, BGE_MSI_MODE, msi_mode);
5094}
e92f005c
SZ
5095
5096static int
5097bge_setup_tso(struct bge_softc *sc, struct mbuf **mp,
5098 uint16_t *mss0, uint16_t *flags0)
5099{
5100 struct mbuf *m;
5101 struct ip *ip;
5102 struct tcphdr *th;
5103 int thoff, iphlen, hoff, hlen;
5104 uint16_t flags, mss;
5105
5106 m = *mp;
5107 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
5108
5109 hoff = m->m_pkthdr.csum_lhlen;
5110 iphlen = m->m_pkthdr.csum_iphlen;
5111 thoff = m->m_pkthdr.csum_thlen;
5112
5113 KASSERT(hoff > 0, ("invalid ether header len"));
5114 KASSERT(iphlen > 0, ("invalid ip header len"));
5115 KASSERT(thoff > 0, ("invalid tcp header len"));
5116
5117 if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
5118 m = m_pullup(m, hoff + iphlen + thoff);
5119 if (m == NULL) {
5120 *mp = NULL;
5121 return ENOBUFS;
5122 }
5123 *mp = m;
5124 }
5125 ip = mtodoff(m, struct ip *, hoff);
5126 th = mtodoff(m, struct tcphdr *, hoff + iphlen);
5127
5128 mss = m->m_pkthdr.tso_segsz;
5129 flags = BGE_TXBDFLAG_CPU_PRE_DMA | BGE_TXBDFLAG_CPU_POST_DMA;
5130
5131 ip->ip_len = htons(mss + iphlen + thoff);
5132 th->th_sum = 0;
5133
5134 hlen = (iphlen + thoff) >> 2;
5135 mss |= (hlen << 11);
5136
5137 *mss0 = mss;
5138 *flags0 = flags;
5139
5140 return 0;
5141}
57b62224
SZ
5142
5143static void
5144bge_stop_fw(struct bge_softc *sc)
5145{
5146 int i;
5147
5148 if (sc->bge_asf_mode) {
5149 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE);
5150 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
5151 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT);
5152
5153 for (i = 0; i < 100; i++ ) {
5154 if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) &
5155 BGE_RX_CPU_DRV_EVENT))
5156 break;
5157 DELAY(10);
5158 }
5159 }
5160}
5161
5162static void
5163bge_sig_pre_reset(struct bge_softc *sc, int type)
5164{
5165 /*
5166 * Some chips don't like this so only do this if ASF is enabled
5167 */
5168 if (sc->bge_asf_mode)
5169 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
5170
5171 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
5172 switch (type) {
5173 case BGE_RESET_START:
5174 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
5175 BGE_FW_DRV_STATE_START);
5176 break;
5177 case BGE_RESET_SHUTDOWN:
5178 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
5179 BGE_FW_DRV_STATE_UNLOAD);
5180 break;
5181 case BGE_RESET_SUSPEND:
5182 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
5183 BGE_FW_DRV_STATE_SUSPEND);
5184 break;
5185 }
5186 }
5187
57b62224
SZ
5188 if (type == BGE_RESET_START || type == BGE_RESET_SUSPEND)
5189 bge_ape_driver_state_change(sc, type);
57b62224
SZ
5190}
5191
5192static void
5193bge_sig_legacy(struct bge_softc *sc, int type)
5194{
5195 if (sc->bge_asf_mode) {
5196 switch (type) {
5197 case BGE_RESET_START:
5198 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
5199 BGE_FW_DRV_STATE_START);
5200 break;
5201 case BGE_RESET_SHUTDOWN:
5202 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
5203 BGE_FW_DRV_STATE_UNLOAD);
5204 break;
5205 }
5206 }
5207}
5208
5209static void
5210bge_sig_post_reset(struct bge_softc *sc, int type)
5211{
5212 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
5213 switch (type) {
5214 case BGE_RESET_START:
5215 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
5216 BGE_FW_DRV_STATE_START_DONE);
5217 /* START DONE */
5218 break;
5219 case BGE_RESET_SHUTDOWN:
5220 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
5221 BGE_FW_DRV_STATE_UNLOAD_DONE);
5222 break;
5223 }
5224 }
57b62224
SZ
5225 if (type == BGE_RESET_SHUTDOWN)
5226 bge_ape_driver_state_change(sc, type);
57b62224
SZ
5227}
5228
5229static void
5230bge_asf_driver_up(struct bge_softc *sc)
5231{
5232 if (sc->bge_asf_mode & ASF_STACKUP) {
5233 /* Send ASF heartbeat aprox. every 2s */
5234 if (sc->bge_asf_count)
5235 sc->bge_asf_count --;
5236 else {
5237 sc->bge_asf_count = 2;
5238 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB,
5239 BGE_FW_CMD_DRV_ALIVE);
5240 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4);
5241 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB,
5242 BGE_FW_HB_TIMEOUT_SEC);
5243 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
5244 CSR_READ_4(sc, BGE_RX_CPU_EVENT) |
5245 BGE_RX_CPU_DRV_EVENT);
5246 }
5247 }
5248}
ea320e53
SZ
5249
5250/*
5251 * Clear all stale locks and select the lock for this driver instance.
5252 */
5253static void
5254bge_ape_lock_init(struct bge_softc *sc)
5255{
5256 uint32_t bit, regbase;
5257 int i;
5258
5259 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
5260 regbase = BGE_APE_LOCK_GRANT;
5261 else
5262 regbase = BGE_APE_PER_LOCK_GRANT;
5263
5264 /* Clear any stale locks. */
5265 for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) {
5266 switch (i) {
5267 case BGE_APE_LOCK_PHY0:
5268 case BGE_APE_LOCK_PHY1:
5269 case BGE_APE_LOCK_PHY2:
5270 case BGE_APE_LOCK_PHY3:
5271 bit = BGE_APE_LOCK_GRANT_DRIVER0;
5272 break;
5273 default:
5274 if (sc->bge_func_addr == 0)
5275 bit = BGE_APE_LOCK_GRANT_DRIVER0;
5276 else
5277 bit = (1 << sc->bge_func_addr);
5278 }
5279 APE_WRITE_4(sc, regbase + 4 * i, bit);
5280 }
5281
5282 /* Select the PHY lock based on the device's function number. */
5283 switch (sc->bge_func_addr) {
5284 case 0:
5285 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY0;
5286 break;
5287 case 1:
5288 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY1;
5289 break;
5290 case 2:
5291 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY2;
5292 break;
5293 case 3:
5294 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY3;
5295 break;
5296 default:
5297 device_printf(sc->bge_dev,
5298 "PHY lock not supported on this function\n");
5299 }
5300}
5301
5302/*
5303 * Check for APE firmware, set flags, and print version info.
5304 */
5305static void
5306bge_ape_read_fw_ver(struct bge_softc *sc)
5307{
5308 const char *fwtype;
5309 uint32_t apedata, features;
5310
5311 /* Check for a valid APE signature in shared memory. */
5312 apedata = APE_READ_4(sc, BGE_APE_SEG_SIG);
5313 if (apedata != BGE_APE_SEG_SIG_MAGIC) {
5314 device_printf(sc->bge_dev, "no APE signature\n");
5315 sc->bge_mfw_flags &= ~BGE_MFW_ON_APE;
5316 return;
5317 }
5318
5319 /* Check if APE firmware is running. */
5320 apedata = APE_READ_4(sc, BGE_APE_FW_STATUS);
5321 if ((apedata & BGE_APE_FW_STATUS_READY) == 0) {
5322 device_printf(sc->bge_dev, "APE signature found "
5323 "but FW status not ready! 0x%08x\n", apedata);
5324 return;
5325 }
5326
5327 sc->bge_mfw_flags |= BGE_MFW_ON_APE;
5328
5329 /* Fetch the APE firwmare type and version. */
5330 apedata = APE_READ_4(sc, BGE_APE_FW_VERSION);
5331 features = APE_READ_4(sc, BGE_APE_FW_FEATURES);
5332 if ((features & BGE_APE_FW_FEATURE_NCSI) != 0) {
5333 sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI;
5334 fwtype = "NCSI";
5335 } else if ((features & BGE_APE_FW_FEATURE_DASH) != 0) {
5336 sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH;
5337 fwtype = "DASH";
5338 } else
5339 fwtype = "UNKN";
5340
5341 /* Print the APE firmware version. */
5342 device_printf(sc->bge_dev, "APE FW version: %s v%d.%d.%d.%d\n",
5343 fwtype,
5344 (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT,
5345 (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT,
5346 (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT,
5347 (apedata & BGE_APE_FW_VERSION_BLDMSK));
5348}
5349
5350static int
5351bge_ape_lock(struct bge_softc *sc, int locknum)
5352{
5353 uint32_t bit, gnt, req, status;
5354 int i, off;
5355
5356 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
5357 return (0);
5358
5359 /* Lock request/grant registers have different bases. */
5360 if (sc->bge_asicrev == BGE_ASICREV_BCM5761) {
5361 req = BGE_APE_LOCK_REQ;
5362 gnt = BGE_APE_LOCK_GRANT;
5363 } else {
5364 req = BGE_APE_PER_LOCK_REQ;
5365 gnt = BGE_APE_PER_LOCK_GRANT;
5366 }
5367
5368 off = 4 * locknum;
5369
5370 switch (locknum) {
5371 case BGE_APE_LOCK_GPIO:
5372 /* Lock required when using GPIO. */
5373 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
5374 return (0);
5375 if (sc->bge_func_addr == 0)
5376 bit = BGE_APE_LOCK_REQ_DRIVER0;
5377 else
5378 bit = (1 << sc->bge_func_addr);
5379 break;
5380 case BGE_APE_LOCK_GRC:
5381 /* Lock required to reset the device. */
5382 if (sc->bge_func_addr == 0)
5383 bit = BGE_APE_LOCK_REQ_DRIVER0;
5384 else
5385 bit = (1 << sc->bge_func_addr);
5386 break;
5387 case BGE_APE_LOCK_MEM:
5388 /* Lock required when accessing certain APE memory. */
5389 if (sc->bge_func_addr == 0)
5390 bit = BGE_APE_LOCK_REQ_DRIVER0;
5391 else
5392 bit = (1 << sc->bge_func_addr);
5393 break;
5394 case BGE_APE_LOCK_PHY0:
5395 case BGE_APE_LOCK_PHY1:
5396 case BGE_APE_LOCK_PHY2:
5397 case BGE_APE_LOCK_PHY3:
5398 /* Lock required when accessing PHYs. */
5399 bit = BGE_APE_LOCK_REQ_DRIVER0;
5400 break;
5401 default:
5402 return (EINVAL);
5403 }
5404
5405 /* Request a lock. */
5406 APE_WRITE_4(sc, req + off, bit);
5407
5408 /* Wait up to 1 second to acquire lock. */
5409 for (i = 0; i < 20000; i++) {
5410 status = APE_READ_4(sc, gnt + off);
5411 if (status == bit)
5412 break;
5413 DELAY(50);
5414 }
5415
5416 /* Handle any errors. */
5417 if (status != bit) {
5418 device_printf(sc->bge_dev, "APE lock %d request failed! "
5419 "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n",
5420 locknum, req + off, bit & 0xFFFF, gnt + off,
5421 status & 0xFFFF);
5422 /* Revoke the lock request. */
5423 APE_WRITE_4(sc, gnt + off, bit);
5424 return (EBUSY);
5425 }
5426
5427 return (0);
5428}
5429
5430static void
5431bge_ape_unlock(struct bge_softc *sc, int locknum)
5432{
5433 uint32_t bit, gnt;
5434 int off;
5435
5436 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
5437 return;
5438
5439 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
5440 gnt = BGE_APE_LOCK_GRANT;
5441 else
5442 gnt = BGE_APE_PER_LOCK_GRANT;
5443
5444 off = 4 * locknum;
5445
5446 switch (locknum) {
5447 case BGE_APE_LOCK_GPIO:
5448 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
5449 return;
5450 if (sc->bge_func_addr == 0)
5451 bit = BGE_APE_LOCK_GRANT_DRIVER0;
5452 else
5453 bit = (1 << sc->bge_func_addr);
5454 break;
5455 case BGE_APE_LOCK_GRC:
5456 if (sc->bge_func_addr == 0)
5457 bit = BGE_APE_LOCK_GRANT_DRIVER0;
5458 else
5459 bit = (1 << sc->bge_func_addr);
5460 break;
5461 case BGE_APE_LOCK_MEM:
5462 if (sc->bge_func_addr == 0)
5463 bit = BGE_APE_LOCK_GRANT_DRIVER0;
5464 else
5465 bit = (1 << sc->bge_func_addr);
5466 break;
5467 case BGE_APE_LOCK_PHY0:
5468 case BGE_APE_LOCK_PHY1:
5469 case BGE_APE_LOCK_PHY2:
5470 case BGE_APE_LOCK_PHY3:
5471 bit = BGE_APE_LOCK_GRANT_DRIVER0;
5472 break;
5473 default:
5474 return;
5475 }
5476
5477 APE_WRITE_4(sc, gnt + off, bit);
5478}
5479
5480/*
5481 * Send an event to the APE firmware.
5482 */
5483static void
5484bge_ape_send_event(struct bge_softc *sc, uint32_t event)
5485{
5486 uint32_t apedata;
5487 int i;
5488
5489 /* NCSI does not support APE events. */
5490 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
5491 return;
5492
5493 /* Wait up to 1ms for APE to service previous event. */
5494 for (i = 10; i > 0; i--) {
5495 if (bge_ape_lock(sc, BGE_APE_LOCK_MEM) != 0)
5496 break;
5497 apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS);
5498 if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) {
5499 APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event |
5500 BGE_APE_EVENT_STATUS_EVENT_PENDING);
5501 bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
5502 APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1);
5503 break;
5504 }
5505 bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
5506 DELAY(100);
5507 }
5508 if (i == 0)
5509 device_printf(sc->bge_dev, "APE event 0x%08x send timed out\n",
5510 event);
5511}
5512
5513static void
5514bge_ape_driver_state_change(struct bge_softc *sc, int kind)
5515{
5516 uint32_t apedata, event;
5517
5518 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
5519 return;
5520
5521 switch (kind) {
5522 case BGE_RESET_START:
5523 /* If this is the first load, clear the load counter. */
5524 apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG);
5525 if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC)
5526 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0);
5527 else {
5528 apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT);
5529 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata);
5530 }
5531 APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG,
5532 BGE_APE_HOST_SEG_SIG_MAGIC);
5533 APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN,
5534 BGE_APE_HOST_SEG_LEN_MAGIC);
5535
5536 /* Add some version info if bge(4) supports it. */
5537 APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID,
5538 BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0));
5539 APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR,
5540 BGE_APE_HOST_BEHAV_NO_PHYLOCK);
5541 APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS,
5542 BGE_APE_HOST_HEARTBEAT_INT_DISABLE);
5543 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
5544 BGE_APE_HOST_DRVR_STATE_START);
5545 event = BGE_APE_EVENT_STATUS_STATE_START;
5546 break;
5547 case BGE_RESET_SHUTDOWN:
5548 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
5549 BGE_APE_HOST_DRVR_STATE_UNLOAD);
5550 event = BGE_APE_EVENT_STATUS_STATE_UNLOAD;
5551 break;
5552 case BGE_RESET_SUSPEND:
5553 event = BGE_APE_EVENT_STATUS_STATE_SUSPEND;
5554 break;
5555 default:
5556 return;
5557 }
5558
5559 bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT |
5560 BGE_APE_EVENT_STATUS_STATE_CHNGE);
5561}