- Return error if reading eeprom times out.
[dragonfly.git] / sys / dev / netif / bge / if_bge.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 *
011c0f93 33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
7b47d9c2 34 * $DragonFly: src/sys/dev/netif/bge/if_bge.c,v 1.66 2007/04/14 04:35:10 sephe Exp $
1de703da 35 *
984263bc
MD
36 */
37
38/*
39 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
40 *
41 * Written by Bill Paul <wpaul@windriver.com>
42 * Senior Engineer, Wind River Systems
43 */
44
45/*
46 * The Broadcom BCM5700 is based on technology originally developed by
47 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
49 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
51 * frames, highly configurable RX filtering, and 16 RX and TX queues
52 * (which, along with RX filter rules, can be used for QOS applications).
53 * Other features, such as TCP segmentation, may be available as part
54 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55 * firmware images can be stored in hardware and need not be compiled
56 * into the driver.
57 *
58 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
60 *
61 * The BCM5701 is a single-chip solution incorporating both the BCM5700
62 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63 * does not support external SSRAM.
64 *
65 * Broadcom also produces a variation of the BCM5700 under the "Altima"
66 * brand name, which is functionally similar but lacks PCI-X support.
67 *
68 * Without external SSRAM, you can only have at most 4 TX rings,
69 * and the use of the mini RX ring is disabled. This seems to imply
70 * that these features are simply not available on the BCM5701. As a
71 * result, this driver does not implement any support for the mini RX
72 * ring.
73 */
74
75#include <sys/param.h>
76#include <sys/systm.h>
77#include <sys/sockio.h>
78#include <sys/mbuf.h>
79#include <sys/malloc.h>
80#include <sys/kernel.h>
81#include <sys/socket.h>
82#include <sys/queue.h>
16dca0df 83#include <sys/serialize.h>
9a717c15 84#include <sys/thread2.h>
984263bc
MD
85
86#include <net/if.h>
936ff230 87#include <net/ifq_var.h>
984263bc
MD
88#include <net/if_arp.h>
89#include <net/ethernet.h>
90#include <net/if_dl.h>
91#include <net/if_media.h>
92
93#include <net/bpf.h>
94
95#include <net/if_types.h>
1f2de5d4 96#include <net/vlan/if_vlan_var.h>
984263bc
MD
97
98#include <netinet/in_systm.h>
99#include <netinet/in.h>
100#include <netinet/ip.h>
101
102#include <vm/vm.h> /* for vtophys */
103#include <vm/pmap.h> /* for vtophys */
984263bc
MD
104#include <sys/bus.h>
105#include <sys/rman.h>
106
1f2de5d4
MD
107#include <dev/netif/mii_layer/mii.h>
108#include <dev/netif/mii_layer/miivar.h>
109#include <dev/netif/mii_layer/miidevs.h>
110#include <dev/netif/mii_layer/brgphyreg.h>
984263bc 111
f952ab63 112#include <bus/pci/pcidevs.h>
1f2de5d4
MD
113#include <bus/pci/pcireg.h>
114#include <bus/pci/pcivar.h>
984263bc 115
1f2de5d4 116#include "if_bgereg.h"
984263bc
MD
117
118#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
119
120/* "controller miibus0" required. See GENERIC if you get errors here. */
121#include "miibus_if.h"
122
984263bc
MD
123/*
124 * Various supported device vendors/types and their names. Note: the
125 * spec seems to indicate that the hardware still has Alteon's vendor
126 * ID burned into it, though it will always be overriden by the vendor
127 * ID in the EEPROM. Just to be safe, we cover all possibilities.
128 */
129#define BGE_DEVDESC_MAX 64 /* Maximum device description length */
130
131static struct bge_type bge_devs[] = {
f952ab63 132 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700,
9a6ee7e2
JS
133 "Alteon BCM5700 Gigabit Ethernet" },
134 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701,
135 "Alteon BCM5701 Gigabit Ethernet" },
f952ab63 136 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700,
984263bc 137 "Broadcom BCM5700 Gigabit Ethernet" },
f952ab63 138 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701,
984263bc 139 "Broadcom BCM5701 Gigabit Ethernet" },
f952ab63 140 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X,
984263bc 141 "Broadcom BCM5702X Gigabit Ethernet" },
9a6ee7e2
JS
142 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT,
143 "Broadcom BCM5702 Gigabit Ethernet" },
f952ab63
JS
144 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X,
145 "Broadcom BCM5703X Gigabit Ethernet" },
9a6ee7e2
JS
146 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703A3,
147 "Broadcom BCM5703 Gigabit Ethernet" },
f952ab63 148 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C,
984263bc 149 "Broadcom BCM5704C Dual Gigabit Ethernet" },
f952ab63 150 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S,
984263bc 151 "Broadcom BCM5704S Dual Gigabit Ethernet" },
f952ab63 152 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705,
7e40b8c5 153 "Broadcom BCM5705 Gigabit Ethernet" },
9a6ee7e2
JS
154 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K,
155 "Broadcom BCM5705K Gigabit Ethernet" },
f952ab63 156 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M,
7e40b8c5 157 "Broadcom BCM5705M Gigabit Ethernet" },
9a6ee7e2 158 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT,
7e40b8c5 159 "Broadcom BCM5705M Gigabit Ethernet" },
92decf65 160 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714,
9a6ee7e2
JS
161 "Broadcom BCM5714C Gigabit Ethernet" },
162 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721,
163 "Broadcom BCM5721 Gigabit Ethernet" },
164 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750,
165 "Broadcom BCM5750 Gigabit Ethernet" },
166 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750M,
167 "Broadcom BCM5750M Gigabit Ethernet" },
b7bef88c
JS
168 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751,
169 "Broadcom BCM5751 Gigabit Ethernet" },
9a6ee7e2
JS
170 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M,
171 "Broadcom BCM5751M Gigabit Ethernet" },
bae5fe9a
SZ
172 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752,
173 "Broadcom BCM5752 Gigabit Ethernet" },
f952ab63 174 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782,
7e40b8c5 175 "Broadcom BCM5782 Gigabit Ethernet" },
9a6ee7e2 176 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788,
f952ab63 177 "Broadcom BCM5788 Gigabit Ethernet" },
9a6ee7e2
JS
178 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789,
179 "Broadcom BCM5789 Gigabit Ethernet" },
f952ab63
JS
180 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901,
181 "Broadcom BCM5901 Fast Ethernet" },
182 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2,
183 "Broadcom BCM5901A2 Fast Ethernet" },
184 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
984263bc 185 "SysKonnect Gigabit Ethernet" },
f952ab63 186 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000,
984263bc 187 "Altima AC1000 Gigabit Ethernet" },
f952ab63 188 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001,
7e40b8c5 189 "Altima AC1002 Gigabit Ethernet" },
f952ab63 190 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100,
984263bc
MD
191 "Altima AC9100 Gigabit Ethernet" },
192 { 0, 0, NULL }
193};
194
33c39a69
JS
195static int bge_probe(device_t);
196static int bge_attach(device_t);
197static int bge_detach(device_t);
198static void bge_release_resources(struct bge_softc *);
199static void bge_txeof(struct bge_softc *);
200static void bge_rxeof(struct bge_softc *);
201
202static void bge_tick(void *);
78195a76 203static void bge_tick_serialized(void *);
33c39a69
JS
204static void bge_stats_update(struct bge_softc *);
205static void bge_stats_update_regs(struct bge_softc *);
206static int bge_encap(struct bge_softc *, struct mbuf *, uint32_t *);
207
208static void bge_intr(void *);
209static void bge_start(struct ifnet *);
210static int bge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
211static void bge_init(void *);
212static void bge_stop(struct bge_softc *);
213static void bge_watchdog(struct ifnet *);
214static void bge_shutdown(device_t);
aa65409c
SZ
215static int bge_suspend(device_t);
216static int bge_resume(device_t);
33c39a69
JS
217static int bge_ifmedia_upd(struct ifnet *);
218static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
219
220static uint8_t bge_eeprom_getbyte(struct bge_softc *, uint32_t, uint8_t *);
221static int bge_read_eeprom(struct bge_softc *, caddr_t, uint32_t, size_t);
222
33c39a69 223static void bge_setmulti(struct bge_softc *);
6439b28a 224static void bge_setpromisc(struct bge_softc *);
33c39a69
JS
225
226static void bge_handle_events(struct bge_softc *);
227static int bge_alloc_jumbo_mem(struct bge_softc *);
228static void bge_free_jumbo_mem(struct bge_softc *);
2aa9b12f
JS
229static struct bge_jslot
230 *bge_jalloc(struct bge_softc *);
231static void bge_jfree(void *);
232static void bge_jref(void *);
33c39a69
JS
233static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *);
234static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
235static int bge_init_rx_ring_std(struct bge_softc *);
236static void bge_free_rx_ring_std(struct bge_softc *);
237static int bge_init_rx_ring_jumbo(struct bge_softc *);
238static void bge_free_rx_ring_jumbo(struct bge_softc *);
239static void bge_free_tx_ring(struct bge_softc *);
240static int bge_init_tx_ring(struct bge_softc *);
241
242static int bge_chipinit(struct bge_softc *);
243static int bge_blockinit(struct bge_softc *);
984263bc
MD
244
245#ifdef notdef
33c39a69
JS
246static uint8_t bge_vpd_readbyte(struct bge_softc *, uint32_t);
247static void bge_vpd_read_res(struct bge_softc *, struct vpd_res *, uint32_t);
248static void bge_vpd_read(struct bge_softc *);
984263bc
MD
249#endif
250
33c39a69
JS
251static uint32_t bge_readmem_ind(struct bge_softc *, uint32_t);
252static void bge_writemem_ind(struct bge_softc *, uint32_t, uint32_t);
984263bc 253#ifdef notdef
33c39a69 254static uint32_t bge_readreg_ind(struct bge_softc *, uint32_t);
984263bc 255#endif
33c39a69 256static void bge_writereg_ind(struct bge_softc *, uint32_t, uint32_t);
984263bc 257
33c39a69
JS
258static int bge_miibus_readreg(device_t, int, int);
259static int bge_miibus_writereg(device_t, int, int, int);
260static void bge_miibus_statchg(device_t);
984263bc 261
33c39a69 262static void bge_reset(struct bge_softc *);
984263bc 263
5c56d5d8
SZ
264/*
265 * Set following tunable to 1 for some IBM blade servers with the DNLK
266 * switch module. Auto negotiation is broken for those configurations.
267 */
268static int bge_fake_autoneg = 0;
269TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg);
270
984263bc
MD
271static device_method_t bge_methods[] = {
272 /* Device interface */
273 DEVMETHOD(device_probe, bge_probe),
274 DEVMETHOD(device_attach, bge_attach),
275 DEVMETHOD(device_detach, bge_detach),
276 DEVMETHOD(device_shutdown, bge_shutdown),
aa65409c
SZ
277 DEVMETHOD(device_suspend, bge_suspend),
278 DEVMETHOD(device_resume, bge_resume),
984263bc
MD
279
280 /* bus interface */
281 DEVMETHOD(bus_print_child, bus_generic_print_child),
282 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
283
284 /* MII interface */
285 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
286 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
287 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
288
289 { 0, 0 }
290};
291
33c39a69 292static DEFINE_CLASS_0(bge, bge_driver, bge_methods, sizeof(struct bge_softc));
984263bc
MD
293static devclass_t bge_devclass;
294
32832096 295DECLARE_DUMMY_MODULE(if_bge);
984263bc
MD
296DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, 0, 0);
297DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
298
33c39a69
JS
299static uint32_t
300bge_readmem_ind(struct bge_softc *sc, uint32_t off)
984263bc 301{
33c39a69 302 device_t dev = sc->bge_dev;
984263bc
MD
303
304 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
305 return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
306}
307
308static void
33c39a69 309bge_writemem_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
984263bc 310{
33c39a69 311 device_t dev = sc->bge_dev;
984263bc
MD
312
313 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
314 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
984263bc
MD
315}
316
317#ifdef notdef
33c39a69
JS
318static uint32_t
319bge_readreg_ind(struct bge_softc *sc, uin32_t off)
984263bc 320{
33c39a69 321 device_t dev = sc->bge_dev;
984263bc
MD
322
323 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
324 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
325}
326#endif
327
328static void
33c39a69 329bge_writereg_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
984263bc 330{
33c39a69 331 device_t dev = sc->bge_dev;
984263bc
MD
332
333 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
334 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
984263bc
MD
335}
336
337#ifdef notdef
33c39a69
JS
338static uint8_t
339bge_vpd_readbyte(struct bge_softc *sc, uint32_t addr)
984263bc 340{
33c39a69
JS
341 device_t dev = sc->bge_dev;
342 uint32_t val;
984263bc 343 int i;
984263bc 344
984263bc
MD
345 pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
346 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
347 DELAY(10);
348 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
349 break;
350 }
351
352 if (i == BGE_TIMEOUT) {
c6fd6f3b 353 device_printf(sc->bge_dev, "VPD read timed out\n");
984263bc
MD
354 return(0);
355 }
356
357 val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
358
359 return((val >> ((addr % 4) * 8)) & 0xFF);
360}
361
362static void
33c39a69 363bge_vpd_read_res(struct bge_softc *sc, struct vpd_res *res, uint32_t addr)
984263bc 364{
33c39a69
JS
365 size_t i;
366 uint8_t *ptr;
984263bc 367
33c39a69 368 ptr = (uint8_t *)res;
984263bc
MD
369 for (i = 0; i < sizeof(struct vpd_res); i++)
370 ptr[i] = bge_vpd_readbyte(sc, i + addr);
371
372 return;
373}
374
375static void
33c39a69 376bge_vpd_read(struct bge_softc *sc)
984263bc
MD
377{
378 int pos = 0, i;
379 struct vpd_res res;
380
381 if (sc->bge_vpd_prodname != NULL)
efda3bd0 382 kfree(sc->bge_vpd_prodname, M_DEVBUF);
984263bc 383 if (sc->bge_vpd_readonly != NULL)
efda3bd0 384 kfree(sc->bge_vpd_readonly, M_DEVBUF);
984263bc
MD
385 sc->bge_vpd_prodname = NULL;
386 sc->bge_vpd_readonly = NULL;
387
388 bge_vpd_read_res(sc, &res, pos);
389
390 if (res.vr_id != VPD_RES_ID) {
c6fd6f3b
JS
391 device_printf(sc->bge_dev,
392 "bad VPD resource id: expected %x got %x\n",
393 VPD_RES_ID, res.vr_id);
984263bc
MD
394 return;
395 }
396
397 pos += sizeof(res);
efda3bd0 398 sc->bge_vpd_prodname = kmalloc(res.vr_len + 1, M_DEVBUF, M_INTWAIT);
984263bc
MD
399 for (i = 0; i < res.vr_len; i++)
400 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
401 sc->bge_vpd_prodname[i] = '\0';
402 pos += i;
403
404 bge_vpd_read_res(sc, &res, pos);
405
406 if (res.vr_id != VPD_RES_READ) {
c6fd6f3b
JS
407 device_printf(sc->bge_dev,
408 "bad VPD resource id: expected %x got %x\n",
409 VPD_RES_READ, res.vr_id);
984263bc
MD
410 return;
411 }
412
413 pos += sizeof(res);
efda3bd0 414 sc->bge_vpd_readonly = kmalloc(res.vr_len, M_DEVBUF, M_INTWAIT);
984263bc
MD
415 for (i = 0; i < res.vr_len + 1; i++)
416 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
984263bc
MD
417}
418#endif
419
420/*
421 * Read a byte of data stored in the EEPROM at address 'addr.' The
422 * BCM570x supports both the traditional bitbang interface and an
423 * auto access interface for reading the EEPROM. We use the auto
424 * access method.
425 */
33c39a69
JS
426static uint8_t
427bge_eeprom_getbyte(struct bge_softc *sc, uint32_t addr, uint8_t *dest)
984263bc
MD
428{
429 int i;
33c39a69 430 uint32_t byte = 0;
984263bc
MD
431
432 /*
433 * Enable use of auto EEPROM access so we can avoid
434 * having to use the bitbang method.
435 */
436 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
437
438 /* Reset the EEPROM, load the clock period. */
439 CSR_WRITE_4(sc, BGE_EE_ADDR,
440 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
441 DELAY(20);
442
443 /* Issue the read EEPROM command. */
444 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
445
446 /* Wait for completion */
447 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
448 DELAY(10);
449 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
450 break;
451 }
452
453 if (i == BGE_TIMEOUT) {
c6fd6f3b 454 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
7b47d9c2 455 return(1);
984263bc
MD
456 }
457
458 /* Get result. */
459 byte = CSR_READ_4(sc, BGE_EE_DATA);
460
461 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
462
463 return(0);
464}
465
466/*
467 * Read a sequence of bytes from the EEPROM.
468 */
469static int
33c39a69 470bge_read_eeprom(struct bge_softc *sc, caddr_t dest, uint32_t off, size_t len)
984263bc 471{
33c39a69
JS
472 size_t i;
473 int err;
474 uint8_t byte;
984263bc 475
33c39a69 476 for (byte = 0, err = 0, i = 0; i < len; i++) {
984263bc
MD
477 err = bge_eeprom_getbyte(sc, off + i, &byte);
478 if (err)
479 break;
480 *(dest + i) = byte;
481 }
482
483 return(err ? 1 : 0);
484}
485
486static int
33c39a69 487bge_miibus_readreg(device_t dev, int phy, int reg)
984263bc
MD
488{
489 struct bge_softc *sc;
490 struct ifnet *ifp;
33c39a69 491 uint32_t val, autopoll;
984263bc
MD
492 int i;
493
494 sc = device_get_softc(dev);
495 ifp = &sc->arpcom.ac_if;
496
7e40b8c5
HP
497 /*
498 * Broadcom's own driver always assumes the internal
499 * PHY is at GMII address 1. On some chips, the PHY responds
500 * to accesses at all addresses, which could cause us to
501 * bogusly attach the PHY 32 times at probe type. Always
502 * restricting the lookup to address 1 is simpler than
503 * trying to figure out which chips revisions should be
504 * special-cased.
505 */
984263bc 506 if (phy != 1)
7e40b8c5 507 return(0);
984263bc
MD
508
509 /* Reading with autopolling on may trigger PCI errors */
510 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
511 if (autopoll & BGE_MIMODE_AUTOPOLL) {
512 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
513 DELAY(40);
514 }
515
516 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
517 BGE_MIPHY(phy)|BGE_MIREG(reg));
518
519 for (i = 0; i < BGE_TIMEOUT; i++) {
520 val = CSR_READ_4(sc, BGE_MI_COMM);
521 if (!(val & BGE_MICOMM_BUSY))
522 break;
523 }
524
525 if (i == BGE_TIMEOUT) {
c6fd6f3b 526 if_printf(ifp, "PHY read timed out\n");
984263bc
MD
527 val = 0;
528 goto done;
529 }
530
531 val = CSR_READ_4(sc, BGE_MI_COMM);
532
533done:
534 if (autopoll & BGE_MIMODE_AUTOPOLL) {
535 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
536 DELAY(40);
537 }
538
539 if (val & BGE_MICOMM_READFAIL)
540 return(0);
541
542 return(val & 0xFFFF);
543}
544
545static int
33c39a69 546bge_miibus_writereg(device_t dev, int phy, int reg, int val)
984263bc
MD
547{
548 struct bge_softc *sc;
33c39a69 549 uint32_t autopoll;
984263bc
MD
550 int i;
551
552 sc = device_get_softc(dev);
553
554 /* Reading with autopolling on may trigger PCI errors */
555 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
556 if (autopoll & BGE_MIMODE_AUTOPOLL) {
557 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
558 DELAY(40);
559 }
560
561 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
562 BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
563
564 for (i = 0; i < BGE_TIMEOUT; i++) {
565 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
566 break;
567 }
568
569 if (autopoll & BGE_MIMODE_AUTOPOLL) {
570 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
571 DELAY(40);
572 }
573
574 if (i == BGE_TIMEOUT) {
c6fd6f3b 575 if_printf(&sc->arpcom.ac_if, "PHY read timed out\n");
984263bc
MD
576 return(0);
577 }
578
579 return(0);
580}
581
582static void
33c39a69 583bge_miibus_statchg(device_t dev)
984263bc
MD
584{
585 struct bge_softc *sc;
586 struct mii_data *mii;
587
588 sc = device_get_softc(dev);
589 mii = device_get_softc(sc->bge_miibus);
590
591 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
7f259627 592 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
984263bc
MD
593 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
594 } else {
595 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
596 }
597
598 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
599 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
600 } else {
601 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
602 }
984263bc
MD
603}
604
605/*
606 * Handle events that have triggered interrupts.
607 */
608static void
33c39a69 609bge_handle_events(struct bge_softc *sc)
984263bc 610{
984263bc
MD
611}
612
613/*
614 * Memory management for jumbo frames.
615 */
984263bc 616static int
33c39a69 617bge_alloc_jumbo_mem(struct bge_softc *sc)
984263bc 618{
2aa9b12f 619 struct bge_jslot *entry;
33c39a69
JS
620 caddr_t ptr;
621 int i;
984263bc
MD
622
623 /* Grab a big chunk o' storage. */
624 sc->bge_cdata.bge_jumbo_buf = contigmalloc(BGE_JMEM, M_DEVBUF,
9a717c15 625 M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0);
984263bc
MD
626
627 if (sc->bge_cdata.bge_jumbo_buf == NULL) {
c6fd6f3b 628 if_printf(&sc->arpcom.ac_if, "no memory for jumbo buffers!\n");
984263bc
MD
629 return(ENOBUFS);
630 }
631
632 SLIST_INIT(&sc->bge_jfree_listhead);
984263bc
MD
633
634 /*
635 * Now divide it up into 9K pieces and save the addresses
636 * in an array. Note that we play an evil trick here by using
637 * the first few bytes in the buffer to hold the the address
638 * of the softc structure for this interface. This is because
639 * bge_jfree() needs it, but it is called by the mbuf management
640 * code which will not pass it to us explicitly.
641 */
642 ptr = sc->bge_cdata.bge_jumbo_buf;
643 for (i = 0; i < BGE_JSLOTS; i++) {
2aa9b12f
JS
644 entry = &sc->bge_cdata.bge_jslots[i];
645 entry->bge_sc = sc;
646 entry->bge_buf = ptr;
647 entry->bge_inuse = 0;
648 entry->bge_slot = i;
649 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jslot_link);
650 ptr += BGE_JLEN;
984263bc
MD
651 }
652
653 return(0);
654}
655
656static void
33c39a69 657bge_free_jumbo_mem(struct bge_softc *sc)
984263bc 658{
9a717c15
JS
659 if (sc->bge_cdata.bge_jumbo_buf)
660 contigfree(sc->bge_cdata.bge_jumbo_buf, BGE_JMEM, M_DEVBUF);
984263bc
MD
661}
662
663/*
664 * Allocate a jumbo buffer.
665 */
2aa9b12f 666static struct bge_jslot *
33c39a69 667bge_jalloc(struct bge_softc *sc)
984263bc 668{
2aa9b12f 669 struct bge_jslot *entry;
33c39a69 670
16dca0df 671 lwkt_serialize_enter(&sc->bge_jslot_serializer);
984263bc 672 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
16dca0df
MD
673 if (entry) {
674 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jslot_link);
675 entry->bge_inuse = 1;
676 } else {
c6fd6f3b 677 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
984263bc 678 }
16dca0df 679 lwkt_serialize_exit(&sc->bge_jslot_serializer);
2aa9b12f 680 return(entry);
984263bc
MD
681}
682
683/*
684 * Adjust usage count on a jumbo buffer.
685 */
686static void
2aa9b12f 687bge_jref(void *arg)
984263bc 688{
2aa9b12f
JS
689 struct bge_jslot *entry = (struct bge_jslot *)arg;
690 struct bge_softc *sc = entry->bge_sc;
984263bc
MD
691
692 if (sc == NULL)
693 panic("bge_jref: can't find softc pointer!");
694
16dca0df 695 if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
984263bc
MD
696 panic("bge_jref: asked to reference buffer "
697 "that we don't manage!");
16dca0df 698 } else if (entry->bge_inuse == 0) {
984263bc 699 panic("bge_jref: buffer already free!");
16dca0df
MD
700 } else {
701 atomic_add_int(&entry->bge_inuse, 1);
702 }
984263bc
MD
703}
704
705/*
706 * Release a jumbo buffer.
707 */
708static void
2aa9b12f 709bge_jfree(void *arg)
984263bc 710{
2aa9b12f
JS
711 struct bge_jslot *entry = (struct bge_jslot *)arg;
712 struct bge_softc *sc = entry->bge_sc;
984263bc
MD
713
714 if (sc == NULL)
715 panic("bge_jfree: can't find softc pointer!");
716
16dca0df 717 if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
984263bc 718 panic("bge_jfree: asked to free buffer that we don't manage!");
16dca0df 719 } else if (entry->bge_inuse == 0) {
984263bc 720 panic("bge_jfree: buffer already free!");
16dca0df
MD
721 } else {
722 /*
723 * Possible MP race to 0, use the serializer. The atomic insn
724 * is still needed for races against bge_jref().
725 */
726 lwkt_serialize_enter(&sc->bge_jslot_serializer);
727 atomic_subtract_int(&entry->bge_inuse, 1);
728 if (entry->bge_inuse == 0) {
729 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
730 entry, jslot_link);
731 }
732 lwkt_serialize_exit(&sc->bge_jslot_serializer);
733 }
984263bc
MD
734}
735
736
737/*
738 * Intialize a standard receive ring descriptor.
739 */
740static int
33c39a69 741bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m)
984263bc 742{
33c39a69
JS
743 struct mbuf *m_new = NULL;
744 struct bge_rx_bd *r;
984263bc
MD
745
746 if (m == NULL) {
d5086f2b 747 m_new = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
33c39a69 748 if (m_new == NULL)
d5086f2b 749 return (ENOBUFS);
984263bc
MD
750 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
751 } else {
752 m_new = m;
753 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
754 m_new->m_data = m_new->m_ext.ext_buf;
755 }
756
757 if (!sc->bge_rx_alignment_bug)
758 m_adj(m_new, ETHER_ALIGN);
759 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
760 r = &sc->bge_rdata->bge_rx_std_ring[i];
7e40b8c5 761 BGE_HOSTADDR(r->bge_addr, vtophys(mtod(m_new, caddr_t)));
984263bc
MD
762 r->bge_flags = BGE_RXBDFLAG_END;
763 r->bge_len = m_new->m_len;
764 r->bge_idx = i;
765
766 return(0);
767}
768
769/*
770 * Initialize a jumbo receive ring descriptor. This allocates
771 * a jumbo buffer from the pool managed internally by the driver.
772 */
773static int
33c39a69 774bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
984263bc
MD
775{
776 struct mbuf *m_new = NULL;
777 struct bge_rx_bd *r;
778
779 if (m == NULL) {
2aa9b12f 780 struct bge_jslot *buf;
984263bc
MD
781
782 /* Allocate the mbuf. */
74f1caca 783 MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
33c39a69 784 if (m_new == NULL)
984263bc 785 return(ENOBUFS);
984263bc
MD
786
787 /* Allocate the jumbo buffer */
788 buf = bge_jalloc(sc);
789 if (buf == NULL) {
790 m_freem(m_new);
c6fd6f3b
JS
791 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed "
792 "-- packet dropped!\n");
984263bc
MD
793 return(ENOBUFS);
794 }
795
796 /* Attach the buffer to the mbuf. */
2aa9b12f
JS
797 m_new->m_ext.ext_arg = buf;
798 m_new->m_ext.ext_buf = buf->bge_buf;
b542cd49
JS
799 m_new->m_ext.ext_free = bge_jfree;
800 m_new->m_ext.ext_ref = bge_jref;
2aa9b12f
JS
801 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
802
803 m_new->m_data = m_new->m_ext.ext_buf;
804 m_new->m_flags |= M_EXT;
805 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
984263bc
MD
806 } else {
807 m_new = m;
808 m_new->m_data = m_new->m_ext.ext_buf;
809 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
810 }
811
812 if (!sc->bge_rx_alignment_bug)
813 m_adj(m_new, ETHER_ALIGN);
814 /* Set up the descriptor. */
815 r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
816 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
7e40b8c5 817 BGE_HOSTADDR(r->bge_addr, vtophys(mtod(m_new, caddr_t)));
984263bc
MD
818 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
819 r->bge_len = m_new->m_len;
820 r->bge_idx = i;
821
822 return(0);
823}
824
825/*
826 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
827 * that's 1MB or memory, which is a lot. For now, we fill only the first
828 * 256 ring entries and hope that our CPU is fast enough to keep up with
829 * the NIC.
830 */
831static int
33c39a69 832bge_init_rx_ring_std(struct bge_softc *sc)
984263bc
MD
833{
834 int i;
835
836 for (i = 0; i < BGE_SSLOTS; i++) {
837 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
838 return(ENOBUFS);
839 };
840
841 sc->bge_std = i - 1;
842 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
843
844 return(0);
845}
846
847static void
33c39a69 848bge_free_rx_ring_std(struct bge_softc *sc)
984263bc
MD
849{
850 int i;
851
852 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
853 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
854 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
855 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
856 }
33c39a69 857 bzero(&sc->bge_rdata->bge_rx_std_ring[i],
984263bc
MD
858 sizeof(struct bge_rx_bd));
859 }
984263bc
MD
860}
861
862static int
33c39a69 863bge_init_rx_ring_jumbo(struct bge_softc *sc)
984263bc
MD
864{
865 int i;
866 struct bge_rcb *rcb;
867
868 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
869 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
870 return(ENOBUFS);
871 };
872
873 sc->bge_jumbo = i - 1;
874
875 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
876 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
877 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
878
879 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
880
881 return(0);
882}
883
884static void
33c39a69 885bge_free_rx_ring_jumbo(struct bge_softc *sc)
984263bc
MD
886{
887 int i;
888
889 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
890 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
891 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
892 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
893 }
33c39a69 894 bzero(&sc->bge_rdata->bge_rx_jumbo_ring[i],
984263bc
MD
895 sizeof(struct bge_rx_bd));
896 }
984263bc
MD
897}
898
899static void
33c39a69 900bge_free_tx_ring(struct bge_softc *sc)
984263bc
MD
901{
902 int i;
903
904 if (sc->bge_rdata->bge_tx_ring == NULL)
905 return;
906
907 for (i = 0; i < BGE_TX_RING_CNT; i++) {
908 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
909 m_freem(sc->bge_cdata.bge_tx_chain[i]);
910 sc->bge_cdata.bge_tx_chain[i] = NULL;
911 }
33c39a69 912 bzero(&sc->bge_rdata->bge_tx_ring[i],
984263bc
MD
913 sizeof(struct bge_tx_bd));
914 }
984263bc
MD
915}
916
917static int
33c39a69 918bge_init_tx_ring(struct bge_softc *sc)
984263bc
MD
919{
920 sc->bge_txcnt = 0;
921 sc->bge_tx_saved_considx = 0;
94db8384
SZ
922 sc->bge_tx_prodidx = 0;
923
924 /* Initialize transmit producer index for host-memory send ring. */
925 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
984263bc 926
984263bc
MD
927 /* 5700 b2 errata */
928 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
929 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
930
931 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
932 /* 5700 b2 errata */
933 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
934 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
935
936 return(0);
937}
938
984263bc 939static void
33c39a69 940bge_setmulti(struct bge_softc *sc)
984263bc
MD
941{
942 struct ifnet *ifp;
943 struct ifmultiaddr *ifma;
33c39a69 944 uint32_t hashes[4] = { 0, 0, 0, 0 };
984263bc
MD
945 int h, i;
946
947 ifp = &sc->arpcom.ac_if;
948
949 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
950 for (i = 0; i < 4; i++)
951 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
952 return;
953 }
954
955 /* First, zot all the existing filters. */
956 for (i = 0; i < 4; i++)
957 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
958
959 /* Now program new ones. */
33c39a69 960 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
984263bc
MD
961 if (ifma->ifma_addr->sa_family != AF_LINK)
962 continue;
3b4ec5b8
JS
963 h = ether_crc32_le(
964 LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
965 ETHER_ADDR_LEN) & 0x7f;
984263bc
MD
966 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
967 }
968
969 for (i = 0; i < 4; i++)
970 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
984263bc
MD
971}
972
973/*
974 * Do endian, PCI and DMA initialization. Also check the on-board ROM
975 * self-test results.
976 */
977static int
33c39a69 978bge_chipinit(struct bge_softc *sc)
984263bc 979{
33c39a69
JS
980 int i;
981 uint32_t dma_rw_ctl;
984263bc
MD
982
983 /* Set endianness before we access any non-PCI registers. */
984#if BYTE_ORDER == BIG_ENDIAN
985 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
986 BGE_BIGENDIAN_INIT, 4);
987#else
988 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
989 BGE_LITTLEENDIAN_INIT, 4);
990#endif
991
992 /*
993 * Check the 'ROM failed' bit on the RX CPU to see if
994 * self-tests passed.
995 */
996 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
c6fd6f3b
JS
997 if_printf(&sc->arpcom.ac_if,
998 "RX CPU self-diagnostics failed!\n");
984263bc
MD
999 return(ENODEV);
1000 }
1001
1002 /* Clear the MAC control register */
1003 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1004
1005 /*
1006 * Clear the MAC statistics block in the NIC's
1007 * internal memory.
1008 */
1009 for (i = BGE_STATS_BLOCK;
33c39a69 1010 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
984263bc
MD
1011 BGE_MEMWIN_WRITE(sc, i, 0);
1012
1013 for (i = BGE_STATUS_BLOCK;
33c39a69 1014 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
984263bc
MD
1015 BGE_MEMWIN_WRITE(sc, i, 0);
1016
1017 /* Set up the PCI DMA control register. */
9a6ee7e2
JS
1018 if (sc->bge_pcie) {
1019 /* PCI Express */
1020 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1021 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1022 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1023 } else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1024 BGE_PCISTATE_PCI_BUSMODE) {
984263bc
MD
1025 /* Conventional PCI bus */
1026 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1027 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1028 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1029 (0x0F);
1030 } else {
1031 /* PCI-X bus */
1032 /*
1033 * The 5704 uses a different encoding of read/write
1034 * watermarks.
1035 */
1036 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1037 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1038 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1039 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1040 else
1041 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1042 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1043 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1044 (0x0F);
1045
1046 /*
1047 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1048 * for hardware bugs.
1049 */
1050 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1051 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
33c39a69 1052 uint32_t tmp;
984263bc
MD
1053
1054 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1055 if (tmp == 0x6 || tmp == 0x7)
1056 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1057 }
1058 }
1059
1060 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
7e40b8c5 1061 sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
9a6ee7e2
JS
1062 sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1063 sc->bge_asicrev == BGE_ASICREV_BCM5750)
984263bc
MD
1064 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1065 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1066
1067 /*
1068 * Set up general mode register.
1069 */
1070 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME|
1071 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1072 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
b4fdf56d 1073 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
984263bc
MD
1074
1075 /*
1076 * Disable memory write invalidate. Apparently it is not supported
1077 * properly by these devices.
1078 */
1079 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1080
984263bc
MD
1081 /* Set the timer prescaler (always 66Mhz) */
1082 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1083
1084 return(0);
1085}
1086
1087static int
33c39a69 1088bge_blockinit(struct bge_softc *sc)
984263bc
MD
1089{
1090 struct bge_rcb *rcb;
1091 volatile struct bge_rcb *vrcb;
1092 int i;
1093
1094 /*
1095 * Initialize the memory window pointer register so that
1096 * we can access the first 32K of internal NIC RAM. This will
1097 * allow us to set up the TX send ring RCBs and the RX return
1098 * ring RCBs, plus other things which live in NIC memory.
1099 */
1100 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1101
7e40b8c5
HP
1102 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1103
9a6ee7e2
JS
1104 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1105 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
7e40b8c5
HP
1106 /* Configure mbuf memory pool */
1107 if (sc->bge_extram) {
1108 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1109 BGE_EXT_SSRAM);
1110 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1111 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1112 else
1113 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1114 } else {
1115 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1116 BGE_BUFFPOOL_1);
1117 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1118 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1119 else
1120 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1121 }
984263bc 1122
7e40b8c5
HP
1123 /* Configure DMA resource pool */
1124 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1125 BGE_DMA_DESCRIPTORS);
1126 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1127 }
984263bc
MD
1128
1129 /* Configure mbuf pool watermarks */
9a6ee7e2
JS
1130 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1131 sc->bge_asicrev == BGE_ASICREV_BCM5750) {
7e40b8c5
HP
1132 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1133 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1134 } else {
1135 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1136 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1137 }
984263bc
MD
1138 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1139
1140 /* Configure DMA resource watermarks */
1141 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1142 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1143
1144 /* Enable buffer manager */
9a6ee7e2
JS
1145 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1146 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
7e40b8c5
HP
1147 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1148 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
984263bc 1149
7e40b8c5
HP
1150 /* Poll for buffer manager start indication */
1151 for (i = 0; i < BGE_TIMEOUT; i++) {
1152 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1153 break;
1154 DELAY(10);
1155 }
984263bc 1156
7e40b8c5 1157 if (i == BGE_TIMEOUT) {
c6fd6f3b
JS
1158 if_printf(&sc->arpcom.ac_if,
1159 "buffer manager failed to start\n");
7e40b8c5
HP
1160 return(ENXIO);
1161 }
984263bc
MD
1162 }
1163
1164 /* Enable flow-through queues */
1165 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1166 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1167
1168 /* Wait until queue initialization is complete */
1169 for (i = 0; i < BGE_TIMEOUT; i++) {
1170 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1171 break;
1172 DELAY(10);
1173 }
1174
1175 if (i == BGE_TIMEOUT) {
c6fd6f3b
JS
1176 if_printf(&sc->arpcom.ac_if,
1177 "flow-through queue init failed\n");
984263bc
MD
1178 return(ENXIO);
1179 }
1180
1181 /* Initialize the standard RX ring control block */
1182 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
7e40b8c5
HP
1183 BGE_HOSTADDR(rcb->bge_hostaddr,
1184 vtophys(&sc->bge_rdata->bge_rx_std_ring));
9a6ee7e2
JS
1185 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1186 sc->bge_asicrev == BGE_ASICREV_BCM5750)
7e40b8c5
HP
1187 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1188 else
1189 rcb->bge_maxlen_flags =
1190 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
984263bc
MD
1191 if (sc->bge_extram)
1192 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1193 else
1194 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1195 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1196 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1197 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1198 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1199
1200 /*
1201 * Initialize the jumbo RX ring control block
1202 * We set the 'ring disabled' bit in the flags
1203 * field until we're actually ready to start
1204 * using this ring (i.e. once we set the MTU
1205 * high enough to require it).
1206 */
9a6ee7e2
JS
1207 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1208 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
7e40b8c5
HP
1209 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1210 BGE_HOSTADDR(rcb->bge_hostaddr,
1211 vtophys(&sc->bge_rdata->bge_rx_jumbo_ring));
1212 rcb->bge_maxlen_flags =
1213 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1214 BGE_RCB_FLAG_RING_DISABLED);
1215 if (sc->bge_extram)
1216 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1217 else
1218 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1219 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1220 rcb->bge_hostaddr.bge_addr_hi);
1221 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1222 rcb->bge_hostaddr.bge_addr_lo);
1223 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1224 rcb->bge_maxlen_flags);
1225 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1226
1227 /* Set up dummy disabled mini ring RCB */
1228 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
1229 rcb->bge_maxlen_flags =
1230 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1231 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1232 rcb->bge_maxlen_flags);
1233 }
984263bc
MD
1234
1235 /*
1236 * Set the BD ring replentish thresholds. The recommended
1237 * values are 1/8th the number of descriptors allocated to
1238 * each ring.
1239 */
1240 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1241 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1242
1243 /*
1244 * Disable all unused send rings by setting the 'ring disabled'
1245 * bit in the flags field of all the TX send ring control blocks.
1246 * These are located in NIC memory.
1247 */
1248 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1249 BGE_SEND_RING_RCB);
1250 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1251 vrcb->bge_maxlen_flags =
1252 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1253 vrcb->bge_nicaddr = 0;
1254 vrcb++;
1255 }
1256
1257 /* Configure TX RCB 0 (we use only the first ring) */
1258 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1259 BGE_SEND_RING_RCB);
1260 vrcb->bge_hostaddr.bge_addr_hi = 0;
7e40b8c5 1261 BGE_HOSTADDR(vrcb->bge_hostaddr, vtophys(&sc->bge_rdata->bge_tx_ring));
984263bc 1262 vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT);
9a6ee7e2
JS
1263 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1264 sc->bge_asicrev != BGE_ASICREV_BCM5750)
7e40b8c5
HP
1265 vrcb->bge_maxlen_flags =
1266 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0);
984263bc
MD
1267
1268 /* Disable all unused RX return rings */
1269 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1270 BGE_RX_RETURN_RING_RCB);
1271 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1272 vrcb->bge_hostaddr.bge_addr_hi = 0;
1273 vrcb->bge_hostaddr.bge_addr_lo = 0;
1274 vrcb->bge_maxlen_flags =
7e40b8c5 1275 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
984263bc
MD
1276 BGE_RCB_FLAG_RING_DISABLED);
1277 vrcb->bge_nicaddr = 0;
1278 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
33c39a69 1279 (i * (sizeof(uint64_t))), 0);
984263bc
MD
1280 vrcb++;
1281 }
1282
1283 /* Initialize RX ring indexes */
1284 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1285 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1286 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1287
1288 /*
1289 * Set up RX return ring 0
1290 * Note that the NIC address for RX return rings is 0x00000000.
1291 * The return rings live entirely within the host, so the
1292 * nicaddr field in the RCB isn't used.
1293 */
1294 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1295 BGE_RX_RETURN_RING_RCB);
1296 vrcb->bge_hostaddr.bge_addr_hi = 0;
7e40b8c5
HP
1297 BGE_HOSTADDR(vrcb->bge_hostaddr,
1298 vtophys(&sc->bge_rdata->bge_rx_return_ring));
984263bc 1299 vrcb->bge_nicaddr = 0x00000000;
7e40b8c5
HP
1300 vrcb->bge_maxlen_flags =
1301 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0);
984263bc
MD
1302
1303 /* Set random backoff seed for TX */
1304 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1305 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1306 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1307 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1308 BGE_TX_BACKOFF_SEED_MASK);
1309
1310 /* Set inter-packet gap */
1311 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1312
1313 /*
1314 * Specify which ring to use for packets that don't match
1315 * any RX rules.
1316 */
1317 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1318
1319 /*
1320 * Configure number of RX lists. One interrupt distribution
1321 * list, sixteen active lists, one bad frames class.
1322 */
1323 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1324
1325 /* Inialize RX list placement stats mask. */
1326 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1327 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1328
1329 /* Disable host coalescing until we get it set up */
1330 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1331
1332 /* Poll to make sure it's shut down. */
1333 for (i = 0; i < BGE_TIMEOUT; i++) {
1334 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1335 break;
1336 DELAY(10);
1337 }
1338
1339 if (i == BGE_TIMEOUT) {
c6fd6f3b
JS
1340 if_printf(&sc->arpcom.ac_if,
1341 "host coalescing engine failed to idle\n");
984263bc
MD
1342 return(ENXIO);
1343 }
1344
1345 /* Set up host coalescing defaults */
1346 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1347 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1348 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1349 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
9a6ee7e2
JS
1350 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1351 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
7e40b8c5
HP
1352 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1353 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1354 }
984263bc
MD
1355 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1356 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
984263bc
MD
1357
1358 /* Set up address of statistics block */
9a6ee7e2
JS
1359 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1360 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
7e40b8c5
HP
1361 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 0);
1362 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1363 vtophys(&sc->bge_rdata->bge_info.bge_stats));
1364
1365 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1366 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1367 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1368 }
984263bc
MD
1369
1370 /* Set up address of status block */
984263bc
MD
1371 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 0);
1372 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1373 vtophys(&sc->bge_rdata->bge_status_block));
7e40b8c5 1374
984263bc
MD
1375 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
1376 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
1377
1378 /* Turn on host coalescing state machine */
1379 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1380
1381 /* Turn on RX BD completion state machine and enable attentions */
1382 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1383 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1384
1385 /* Turn on RX list placement state machine */
1386 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1387
1388 /* Turn on RX list selector state machine. */
9a6ee7e2
JS
1389 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1390 sc->bge_asicrev != BGE_ASICREV_BCM5750)
7e40b8c5 1391 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
984263bc
MD
1392
1393 /* Turn on DMA, clear stats */
1394 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1395 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1396 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1397 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1398 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1399
1400 /* Set misc. local control, enable interrupts on attentions */
1401 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1402
1403#ifdef notdef
1404 /* Assert GPIO pins for PHY reset */
1405 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1406 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1407 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1408 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1409#endif
1410
1411 /* Turn on DMA completion state machine */
9a6ee7e2
JS
1412 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1413 sc->bge_asicrev != BGE_ASICREV_BCM5750)
7e40b8c5 1414 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
984263bc
MD
1415
1416 /* Turn on write DMA state machine */
1417 CSR_WRITE_4(sc, BGE_WDMA_MODE,
1418 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1419
1420 /* Turn on read DMA state machine */
1421 CSR_WRITE_4(sc, BGE_RDMA_MODE,
1422 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1423
1424 /* Turn on RX data completion state machine */
1425 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1426
1427 /* Turn on RX BD initiator state machine */
1428 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1429
1430 /* Turn on RX data and RX BD initiator state machine */
1431 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1432
1433 /* Turn on Mbuf cluster free state machine */
9a6ee7e2
JS
1434 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1435 sc->bge_asicrev != BGE_ASICREV_BCM5750)
7e40b8c5 1436 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
984263bc
MD
1437
1438 /* Turn on send BD completion state machine */
1439 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1440
1441 /* Turn on send data completion state machine */
1442 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1443
1444 /* Turn on send data initiator state machine */
1445 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1446
1447 /* Turn on send BD initiator state machine */
1448 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1449
1450 /* Turn on send BD selector state machine */
1451 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1452
1453 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1454 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1455 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1456
1457 /* ack/clear link change events */
1458 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
7e40b8c5
HP
1459 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1460 BGE_MACSTAT_LINK_CHANGED);
984263bc
MD
1461
1462 /* Enable PHY auto polling (for MII/GMII only) */
1463 if (sc->bge_tbi) {
1464 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1465 } else {
1466 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1467 if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1468 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1469 BGE_EVTENB_MI_INTERRUPT);
1470 }
1471
1472 /* Enable link state change attentions. */
1473 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1474
1475 return(0);
1476}
1477
1478/*
1479 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1480 * against our list and return its name if we find a match. Note
1481 * that since the Broadcom controller contains VPD support, we
1482 * can get the device name string from the controller itself instead
1483 * of the compiled-in string. This is a little slow, but it guarantees
1484 * we'll always announce the right product name.
1485 */
1486static int
33c39a69 1487bge_probe(device_t dev)
984263bc 1488{
984263bc 1489 struct bge_softc *sc;
33c39a69 1490 struct bge_type *t;
984263bc 1491 char *descbuf;
33c39a69
JS
1492 uint16_t product, vendor;
1493
1494 product = pci_get_device(dev);
1495 vendor = pci_get_vendor(dev);
1496
1497 for (t = bge_devs; t->bge_name != NULL; t++) {
1498 if (vendor == t->bge_vid && product == t->bge_did)
1499 break;
1500 }
984263bc 1501
33c39a69
JS
1502 if (t->bge_name == NULL)
1503 return(ENXIO);
984263bc
MD
1504
1505 sc = device_get_softc(dev);
33c39a69 1506#ifdef notdef
984263bc
MD
1507 sc->bge_dev = dev;
1508
33c39a69
JS
1509 bge_vpd_read(sc);
1510 device_set_desc(dev, sc->bge_vpd_prodname);
984263bc 1511#endif
efda3bd0 1512 descbuf = kmalloc(BGE_DEVDESC_MAX, M_TEMP, M_WAITOK);
f8c7a42d 1513 ksnprintf(descbuf, BGE_DEVDESC_MAX, "%s, ASIC rev. %#04x", t->bge_name,
33c39a69
JS
1514 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1515 device_set_desc_copy(dev, descbuf);
1516 if (pci_get_subvendor(dev) == PCI_VENDOR_DELL)
1517 sc->bge_no_3_led = 1;
efda3bd0 1518 kfree(descbuf, M_TEMP);
33c39a69 1519 return(0);
984263bc
MD
1520}
1521
1522static int
33c39a69 1523bge_attach(device_t dev)
984263bc 1524{
984263bc
MD
1525 struct ifnet *ifp;
1526 struct bge_softc *sc;
33c39a69
JS
1527 uint32_t hwcfg = 0;
1528 uint32_t mac_addr = 0;
c6fd6f3b 1529 int error = 0, rid;
0a8b5977 1530 uint8_t ether_addr[ETHER_ADDR_LEN];
984263bc 1531
984263bc 1532 sc = device_get_softc(dev);
984263bc 1533 sc->bge_dev = dev;
263489fb 1534 callout_init(&sc->bge_stat_timer);
16dca0df 1535 lwkt_serialize_init(&sc->bge_jslot_serializer);
984263bc
MD
1536
1537 /*
1538 * Map control/status registers.
1539 */
cc8ddf9e 1540 pci_enable_busmaster(dev);
984263bc
MD
1541
1542 rid = BGE_PCI_BAR0;
cc8ddf9e
JS
1543 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1544 RF_ACTIVE);
984263bc
MD
1545
1546 if (sc->bge_res == NULL) {
c6fd6f3b 1547 device_printf(dev, "couldn't map memory\n");
984263bc 1548 error = ENXIO;
9a717c15 1549 return(error);
984263bc
MD
1550 }
1551
1552 sc->bge_btag = rman_get_bustag(sc->bge_res);
1553 sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
1554 sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res);
1555
984263bc
MD
1556 /* Allocate interrupt */
1557 rid = 0;
33c39a69 1558
cc8ddf9e 1559 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
984263bc
MD
1560 RF_SHAREABLE | RF_ACTIVE);
1561
1562 if (sc->bge_irq == NULL) {
c6fd6f3b 1563 device_printf(dev, "couldn't map interrupt\n");
984263bc
MD
1564 error = ENXIO;
1565 goto fail;
1566 }
1567
9a6ee7e2
JS
1568 /* Save ASIC rev. */
1569 sc->bge_chipid =
1570 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1571 BGE_PCIMISCCTL_ASICREV;
1572 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
1573 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
1574
1575 /*
bae5fe9a 1576 * Treat the 5714 and the 5752 like the 5750 until we have more info
9a6ee7e2
JS
1577 * on this chip.
1578 */
bae5fe9a
SZ
1579 if (sc->bge_asicrev == BGE_ASICREV_BCM5714 ||
1580 sc->bge_asicrev == BGE_ASICREV_BCM5752)
9a6ee7e2
JS
1581 sc->bge_asicrev = BGE_ASICREV_BCM5750;
1582
1583 /*
1584 * XXX: Broadcom Linux driver. Not in specs or eratta.
1585 * PCI-Express?
1586 */
1587 if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
1588 uint32_t v;
1589
1590 v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
1591 if (((v >> 8) & 0xff) == BGE_PCIE_MSI_CAPID) {
1592 v = pci_read_config(dev, BGE_PCIE_MSI_CAPID, 4);
1593 if ((v & 0xff) == BGE_PCIE_MSI_CAPID_VAL)
1594 sc->bge_pcie = 1;
1595 }
1596 }
1597
c6fd6f3b
JS
1598 ifp = &sc->arpcom.ac_if;
1599 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
984263bc
MD
1600
1601 /* Try to reset the chip. */
1602 bge_reset(sc);
1603
1604 if (bge_chipinit(sc)) {
c6fd6f3b 1605 device_printf(dev, "chip initialization failed\n");
984263bc
MD
1606 error = ENXIO;
1607 goto fail;
1608 }
1609
1610 /*
1611 * Get station address from the EEPROM.
1612 */
1613 mac_addr = bge_readmem_ind(sc, 0x0c14);
1614 if ((mac_addr >> 16) == 0x484b) {
0a8b5977
JS
1615 ether_addr[0] = (uint8_t)(mac_addr >> 8);
1616 ether_addr[1] = (uint8_t)mac_addr;
984263bc 1617 mac_addr = bge_readmem_ind(sc, 0x0c18);
0a8b5977
JS
1618 ether_addr[2] = (uint8_t)(mac_addr >> 24);
1619 ether_addr[3] = (uint8_t)(mac_addr >> 16);
1620 ether_addr[4] = (uint8_t)(mac_addr >> 8);
1621 ether_addr[5] = (uint8_t)mac_addr;
1622 } else if (bge_read_eeprom(sc, ether_addr,
984263bc 1623 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
c6fd6f3b 1624 device_printf(dev, "failed to read station address\n");
984263bc
MD
1625 error = ENXIO;
1626 goto fail;
1627 }
1628
984263bc
MD
1629 /* Allocate the general information block and ring buffers. */
1630 sc->bge_rdata = contigmalloc(sizeof(struct bge_ring_data), M_DEVBUF,
9a717c15 1631 M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0);
984263bc
MD
1632
1633 if (sc->bge_rdata == NULL) {
984263bc 1634 error = ENXIO;
c6fd6f3b 1635 device_printf(dev, "no memory for list buffers!\n");
984263bc
MD
1636 goto fail;
1637 }
1638
1639 bzero(sc->bge_rdata, sizeof(struct bge_ring_data));
1640
7e40b8c5
HP
1641 /*
1642 * Try to allocate memory for jumbo buffers.
9a6ee7e2 1643 * The 5705/5750 does not appear to support jumbo frames.
7e40b8c5 1644 */
9a6ee7e2
JS
1645 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1646 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
7e40b8c5 1647 if (bge_alloc_jumbo_mem(sc)) {
c6fd6f3b 1648 device_printf(dev, "jumbo buffer allocation failed\n");
7e40b8c5
HP
1649 error = ENXIO;
1650 goto fail;
1651 }
984263bc
MD
1652 }
1653
1654 /* Set default tuneable values. */
1655 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
1656 sc->bge_rx_coal_ticks = 150;
1657 sc->bge_tx_coal_ticks = 150;
1658 sc->bge_rx_max_coal_bds = 64;
1659 sc->bge_tx_max_coal_bds = 128;
1660
9a6ee7e2
JS
1661 /* 5705/5750 limits RX return ring to 512 entries. */
1662 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1663 sc->bge_asicrev == BGE_ASICREV_BCM5750)
7e40b8c5
HP
1664 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1665 else
1666 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
1667
984263bc 1668 /* Set up ifnet structure */
984263bc 1669 ifp->if_softc = sc;
984263bc
MD
1670 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1671 ifp->if_ioctl = bge_ioctl;
984263bc
MD
1672 ifp->if_start = bge_start;
1673 ifp->if_watchdog = bge_watchdog;
1674 ifp->if_init = bge_init;
1675 ifp->if_mtu = ETHERMTU;
936ff230
JS
1676 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1677 ifq_set_ready(&ifp->if_snd);
984263bc 1678 ifp->if_hwassist = BGE_CSUM_FEATURES;
26a4e3b9
SZ
1679 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
1680 IFCAP_VLAN_MTU;
984263bc
MD
1681 ifp->if_capenable = ifp->if_capabilities;
1682
984263bc
MD
1683 /*
1684 * Figure out what sort of media we have by checking the
1685 * hardware config word in the first 32k of NIC internal memory,
1686 * or fall back to examining the EEPROM if necessary.
1687 * Note: on some BCM5700 cards, this value appears to be unset.
1688 * If that's the case, we have to rely on identifying the NIC
1689 * by its PCI subsystem ID, as we do below for the SysKonnect
1690 * SK-9D41.
1691 */
1692 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
1693 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
1694 else {
7b47d9c2
SZ
1695 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
1696 sizeof(hwcfg))) {
1697 device_printf(dev, "failed to read EEPROM\n");
1698 error = ENXIO;
1699 goto fail;
1700 }
984263bc
MD
1701 hwcfg = ntohl(hwcfg);
1702 }
1703
1704 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
1705 sc->bge_tbi = 1;
1706
1707 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
cc8ddf9e 1708 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41)
984263bc
MD
1709 sc->bge_tbi = 1;
1710
1711 if (sc->bge_tbi) {
1712 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
1713 bge_ifmedia_upd, bge_ifmedia_sts);
1714 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
1715 ifmedia_add(&sc->bge_ifmedia,
1716 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
1717 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1718 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
70059b3c 1719 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
984263bc
MD
1720 } else {
1721 /*
1722 * Do transceiver setup.
1723 */
1724 if (mii_phy_probe(dev, &sc->bge_miibus,
1725 bge_ifmedia_upd, bge_ifmedia_sts)) {
c6fd6f3b 1726 device_printf(dev, "MII without any PHY!\n");
984263bc
MD
1727 error = ENXIO;
1728 goto fail;
1729 }
1730 }
1731
1732 /*
1733 * When using the BCM5701 in PCI-X mode, data corruption has
1734 * been observed in the first few bytes of some received packets.
1735 * Aligning the packet buffer in memory eliminates the corruption.
1736 * Unfortunately, this misaligns the packet payloads. On platforms
1737 * which do not support unaligned accesses, we will realign the
1738 * payloads by copying the received packets.
1739 */
1740 switch (sc->bge_chipid) {
1741 case BGE_CHIPID_BCM5701_A0:
1742 case BGE_CHIPID_BCM5701_B0:
1743 case BGE_CHIPID_BCM5701_B2:
1744 case BGE_CHIPID_BCM5701_B5:
1745 /* If in PCI-X mode, work around the alignment bug. */
1746 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
1747 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
1748 BGE_PCISTATE_PCI_BUSSPEED)
1749 sc->bge_rx_alignment_bug = 1;
1750 break;
1751 }
1752
1753 /*
1754 * Call MI attach routine.
1755 */
78195a76 1756 ether_ifattach(ifp, ether_addr, NULL);
984263bc 1757
78195a76
MD
1758 error = bus_setup_intr(dev, sc->bge_irq, INTR_NETSAFE,
1759 bge_intr, sc, &sc->bge_intrhand,
1760 ifp->if_serializer);
9a717c15
JS
1761 if (error) {
1762 ether_ifdetach(ifp);
1763 device_printf(dev, "couldn't set up irq\n");
1764 goto fail;
1765 }
9a717c15 1766 return(0);
984263bc 1767fail:
9a717c15 1768 bge_detach(dev);
984263bc
MD
1769 return(error);
1770}
1771
1772static int
33c39a69 1773bge_detach(device_t dev)
984263bc 1774{
9a717c15
JS
1775 struct bge_softc *sc = device_get_softc(dev);
1776 struct ifnet *ifp = &sc->arpcom.ac_if;
984263bc 1777
9a717c15 1778 if (device_is_attached(dev)) {
cdf89432 1779 lwkt_serialize_enter(ifp->if_serializer);
9a717c15
JS
1780 bge_stop(sc);
1781 bge_reset(sc);
cdf89432
SZ
1782 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
1783 lwkt_serialize_exit(ifp->if_serializer);
984263bc 1784
cdf89432
SZ
1785 ether_ifdetach(ifp);
1786 }
9a717c15 1787 if (sc->bge_tbi)
984263bc 1788 ifmedia_removeall(&sc->bge_ifmedia);
cbf32d7e 1789 if (sc->bge_miibus)
984263bc 1790 device_delete_child(dev, sc->bge_miibus);
9a717c15 1791 bus_generic_detach(dev);
984263bc
MD
1792
1793 bge_release_resources(sc);
9a717c15 1794
9a6ee7e2
JS
1795 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1796 sc->bge_asicrev != BGE_ASICREV_BCM5750)
7e40b8c5 1797 bge_free_jumbo_mem(sc);
984263bc 1798
984263bc
MD
1799 return(0);
1800}
1801
1802static void
33c39a69 1803bge_release_resources(struct bge_softc *sc)
984263bc
MD
1804{
1805 device_t dev;
1806
1807 dev = sc->bge_dev;
1808
1809 if (sc->bge_vpd_prodname != NULL)
efda3bd0 1810 kfree(sc->bge_vpd_prodname, M_DEVBUF);
984263bc
MD
1811
1812 if (sc->bge_vpd_readonly != NULL)
efda3bd0 1813 kfree(sc->bge_vpd_readonly, M_DEVBUF);
984263bc 1814
984263bc
MD
1815 if (sc->bge_irq != NULL)
1816 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
1817
1818 if (sc->bge_res != NULL)
1819 bus_release_resource(dev, SYS_RES_MEMORY,
1820 BGE_PCI_BAR0, sc->bge_res);
1821
1822 if (sc->bge_rdata != NULL)
33c39a69
JS
1823 contigfree(sc->bge_rdata, sizeof(struct bge_ring_data),
1824 M_DEVBUF);
984263bc
MD
1825
1826 return;
1827}
1828
1829static void
33c39a69 1830bge_reset(struct bge_softc *sc)
984263bc
MD
1831{
1832 device_t dev;
9a6ee7e2 1833 uint32_t cachesize, command, pcistate, reset;
984263bc
MD
1834 int i, val = 0;
1835
1836 dev = sc->bge_dev;
1837
1838 /* Save some important PCI state. */
1839 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
1840 command = pci_read_config(dev, BGE_PCI_CMD, 4);
1841 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
1842
1843 pci_write_config(dev, BGE_PCI_MISC_CTL,
1844 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1845 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1846
9a6ee7e2
JS
1847 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
1848
1849 /* XXX: Broadcom Linux driver. */
1850 if (sc->bge_pcie) {
1851 if (CSR_READ_4(sc, 0x7e2c) == 0x60) /* PCIE 1.0 */
1852 CSR_WRITE_4(sc, 0x7e2c, 0x20);
1853 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
1854 /* Prevent PCIE link training during global reset */
1855 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
1856 reset |= (1<<29);
1857 }
1858 }
1859
984263bc 1860 /* Issue global reset */
9a6ee7e2 1861 bge_writereg_ind(sc, BGE_MISC_CFG, reset);
984263bc
MD
1862
1863 DELAY(1000);
1864
9a6ee7e2
JS
1865 /* XXX: Broadcom Linux driver. */
1866 if (sc->bge_pcie) {
1867 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
1868 uint32_t v;
1869
1870 DELAY(500000); /* wait for link training to complete */
1871 v = pci_read_config(dev, 0xc4, 4);
1872 pci_write_config(dev, 0xc4, v | (1<<15), 4);
1873 }
1874 /* Set PCIE max payload size and clear error status. */
1875 pci_write_config(dev, 0xd8, 0xf5000, 4);
1876 }
1877
984263bc
MD
1878 /* Reset some of the PCI state that got zapped by reset */
1879 pci_write_config(dev, BGE_PCI_MISC_CTL,
1880 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1881 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1882 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
1883 pci_write_config(dev, BGE_PCI_CMD, command, 4);
1884 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
1885
a313b56f
JS
1886 /* Enable memory arbiter. */
1887 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1888 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
1889
984263bc
MD
1890 /*
1891 * Prevent PXE restart: write a magic number to the
1892 * general communications memory at 0xB50.
1893 */
1894 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1895 /*
1896 * Poll the value location we just wrote until
1897 * we see the 1's complement of the magic number.
1898 * This indicates that the firmware initialization
1899 * is complete.
1900 */
1901 for (i = 0; i < BGE_TIMEOUT; i++) {
1902 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
1903 if (val == ~BGE_MAGIC_NUMBER)
1904 break;
1905 DELAY(10);
1906 }
1907
1908 if (i == BGE_TIMEOUT) {
c6fd6f3b 1909 if_printf(&sc->arpcom.ac_if, "firmware handshake timed out\n");
984263bc
MD
1910 return;
1911 }
1912
1913 /*
1914 * XXX Wait for the value of the PCISTATE register to
1915 * return to its original pre-reset state. This is a
1916 * fairly good indicator of reset completion. If we don't
1917 * wait for the reset to fully complete, trying to read
1918 * from the device's non-PCI registers may yield garbage
1919 * results.
1920 */
1921 for (i = 0; i < BGE_TIMEOUT; i++) {
1922 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
1923 break;
1924 DELAY(10);
1925 }
1926
984263bc
MD
1927 /* Fix up byte swapping */
1928 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME|
1929 BGE_MODECTL_BYTESWAP_DATA);
1930
1931 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1932
70059b3c
JS
1933 /*
1934 * The 5704 in TBI mode apparently needs some special
1935 * adjustment to insure the SERDES drive level is set
1936 * to 1.2V.
1937 */
1938 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) {
1939 uint32_t serdescfg;
1940
1941 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
1942 serdescfg = (serdescfg & ~0xFFF) | 0x880;
1943 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
1944 }
1945
9a6ee7e2
JS
1946 /* XXX: Broadcom Linux driver. */
1947 if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
1948 uint32_t v;
984263bc 1949
9a6ee7e2
JS
1950 v = CSR_READ_4(sc, 0x7c00);
1951 CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
1952 }
1953
1954 DELAY(10000);
984263bc
MD
1955}
1956
1957/*
1958 * Frame reception handling. This is called if there's a frame
1959 * on the receive return list.
1960 *
1961 * Note: we have to be able to handle two possibilities here:
1962 * 1) the frame is from the jumbo recieve ring
1963 * 2) the frame is from the standard receive ring
1964 */
1965
1966static void
33c39a69 1967bge_rxeof(struct bge_softc *sc)
984263bc
MD
1968{
1969 struct ifnet *ifp;
1970 int stdcnt = 0, jumbocnt = 0;
1971
1972 ifp = &sc->arpcom.ac_if;
1973
1974 while(sc->bge_rx_saved_considx !=
1975 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) {
1976 struct bge_rx_bd *cur_rx;
33c39a69 1977 uint32_t rxidx;
984263bc 1978 struct mbuf *m = NULL;
33c39a69 1979 uint16_t vlan_tag = 0;
984263bc
MD
1980 int have_tag = 0;
1981
1982 cur_rx =
1983 &sc->bge_rdata->bge_rx_return_ring[sc->bge_rx_saved_considx];
1984
1985 rxidx = cur_rx->bge_idx;
7e40b8c5 1986 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
984263bc
MD
1987
1988 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
1989 have_tag = 1;
1990 vlan_tag = cur_rx->bge_vlan_tag;
1991 }
1992
1993 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
1994 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1995 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
1996 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
1997 jumbocnt++;
1998 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
1999 ifp->if_ierrors++;
2000 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2001 continue;
2002 }
2003 if (bge_newbuf_jumbo(sc,
2004 sc->bge_jumbo, NULL) == ENOBUFS) {
2005 ifp->if_ierrors++;
2006 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2007 continue;
2008 }
2009 } else {
2010 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2011 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2012 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2013 stdcnt++;
2014 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2015 ifp->if_ierrors++;
2016 bge_newbuf_std(sc, sc->bge_std, m);
2017 continue;
2018 }
2019 if (bge_newbuf_std(sc, sc->bge_std,
2020 NULL) == ENOBUFS) {
2021 ifp->if_ierrors++;
2022 bge_newbuf_std(sc, sc->bge_std, m);
2023 continue;
2024 }
2025 }
2026
2027 ifp->if_ipackets++;
2028#ifndef __i386__
2029 /*
2030 * The i386 allows unaligned accesses, but for other
2031 * platforms we must make sure the payload is aligned.
2032 */
2033 if (sc->bge_rx_alignment_bug) {
2034 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2035 cur_rx->bge_len);
2036 m->m_data += ETHER_ALIGN;
2037 }
2038#endif
160185fa 2039 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
984263bc
MD
2040 m->m_pkthdr.rcvif = ifp;
2041
984263bc
MD
2042#if 0 /* currently broken for some packets, possibly related to TCP options */
2043 if (ifp->if_hwassist) {
2044 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2045 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2046 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2047 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2048 m->m_pkthdr.csum_data =
2049 cur_rx->bge_tcp_udp_csum;
2050 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
2051 }
2052 }
2053#endif
2054
2055 /*
2056 * If we received a packet with a vlan tag, pass it
2057 * to vlan_input() instead of ether_input().
2058 */
2059 if (have_tag) {
3013ac0e 2060 VLAN_INPUT_TAG(m, vlan_tag);
984263bc 2061 have_tag = vlan_tag = 0;
78195a76
MD
2062 } else {
2063 ifp->if_input(ifp, m);
984263bc 2064 }
984263bc
MD
2065 }
2066
2067 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2068 if (stdcnt)
2069 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2070 if (jumbocnt)
2071 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
984263bc
MD
2072}
2073
2074static void
33c39a69 2075bge_txeof(struct bge_softc *sc)
984263bc
MD
2076{
2077 struct bge_tx_bd *cur_tx = NULL;
2078 struct ifnet *ifp;
2079
2080 ifp = &sc->arpcom.ac_if;
2081
2082 /*
2083 * Go through our tx ring and free mbufs for those
2084 * frames that have been sent.
2085 */
2086 while (sc->bge_tx_saved_considx !=
2087 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
33c39a69 2088 uint32_t idx = 0;
984263bc
MD
2089
2090 idx = sc->bge_tx_saved_considx;
2091 cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
2092 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2093 ifp->if_opackets++;
2094 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2095 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2096 sc->bge_cdata.bge_tx_chain[idx] = NULL;
2097 }
2098 sc->bge_txcnt--;
2099 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2100 ifp->if_timer = 0;
2101 }
2102
2103 if (cur_tx != NULL)
2104 ifp->if_flags &= ~IFF_OACTIVE;
984263bc
MD
2105}
2106
2107static void
33c39a69 2108bge_intr(void *xsc)
984263bc 2109{
bf522c7f 2110 struct bge_softc *sc = xsc;
33c39a69 2111 struct ifnet *ifp = &sc->arpcom.ac_if;
d69ff5d4 2112 uint32_t status, statusword, mimode;
0029ccf6
JS
2113
2114 /* XXX */
2115 statusword = loadandclear(&sc->bge_rdata->bge_status_block.bge_status);
984263bc
MD
2116
2117#ifdef notdef
2118 /* Avoid this for now -- checking this register is expensive. */
2119 /* Make sure this is really our interrupt. */
2120 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2121 return;
2122#endif
2123 /* Ack interrupt and stop others from occuring. */
2124 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2125
2126 /*
2127 * Process link state changes.
2128 * Grrr. The link status word in the status block does
2129 * not work correctly on the BCM5700 rev AX and BX chips,
f952ab63 2130 * according to all available information. Hence, we have
984263bc 2131 * to enable MII interrupts in order to properly obtain
f952ab63 2132 * async link changes. Unfortunately, this also means that
984263bc
MD
2133 * we have to read the MAC status register to detect link
2134 * changes, thereby adding an additional register access to
2135 * the interrupt handler.
2136 */
2137
2138 if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
984263bc
MD
2139 status = CSR_READ_4(sc, BGE_MAC_STS);
2140 if (status & BGE_MACSTAT_MI_INTERRUPT) {
2141 sc->bge_link = 0;
263489fb 2142 callout_stop(&sc->bge_stat_timer);
78195a76 2143 bge_tick_serialized(sc);
984263bc
MD
2144 /* Clear the interrupt */
2145 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2146 BGE_EVTENB_MI_INTERRUPT);
2147 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
2148 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
2149 BRGPHY_INTRS);
2150 }
2151 } else {
0029ccf6 2152 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) {
085ed1a0 2153 /*
f952ab63 2154 * Sometimes PCS encoding errors are detected in
085ed1a0
DR
2155 * TBI mode (on fiber NICs), and for some reason
2156 * the chip will signal them as link changes.
2157 * If we get a link change event, but the 'PCS
2158 * encoding error' bit in the MAC status register
2159 * is set, don't bother doing a link check.
2160 * This avoids spurious "gigabit link up" messages
f952ab63 2161 * that sometimes appear on fiber NICs during
085ed1a0
DR
2162 * periods of heavy traffic. (There should be no
2163 * effect on copper NICs.)
d69ff5d4
JS
2164 *
2165 * If we do have a copper NIC (bge_tbi == 0) then
2166 * check that the AUTOPOLL bit is set before
2167 * processing the event as a real link change.
2168 * Turning AUTOPOLL on and off in the MII read/write
2169 * functions will often trigger a link status
2170 * interrupt for no reason.
085ed1a0
DR
2171 */
2172 status = CSR_READ_4(sc, BGE_MAC_STS);
d69ff5d4
JS
2173 mimode = CSR_READ_4(sc, BGE_MI_MODE);
2174 if (!(status & (BGE_MACSTAT_PORT_DECODE_ERROR |
2175 BGE_MACSTAT_MI_COMPLETE)) &&
2176 (!sc->bge_tbi && (mimode & BGE_MIMODE_AUTOPOLL))) {
085ed1a0 2177 sc->bge_link = 0;
263489fb 2178 callout_stop(&sc->bge_stat_timer);
78195a76 2179 bge_tick_serialized(sc);
085ed1a0 2180 }
984263bc 2181 sc->bge_link = 0;
263489fb 2182 callout_stop(&sc->bge_stat_timer);
78195a76 2183 bge_tick_serialized(sc);
984263bc
MD
2184 /* Clear the interrupt */
2185 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
7e40b8c5
HP
2186 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
2187 BGE_MACSTAT_LINK_CHANGED);
984263bc
MD
2188
2189 /* Force flush the status block cached by PCI bridge */
2190 CSR_READ_4(sc, BGE_MBX_IRQ0_LO);
2191 }
2192 }
2193
2194 if (ifp->if_flags & IFF_RUNNING) {
2195 /* Check RX return ring producer/consumer */
2196 bge_rxeof(sc);
2197
2198 /* Check TX ring producer/consumer */
2199 bge_txeof(sc);
2200 }
2201
2202 bge_handle_events(sc);
2203
2204 /* Re-enable interrupts. */
2205 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2206
936ff230 2207 if ((ifp->if_flags & IFF_RUNNING) && !ifq_is_empty(&ifp->if_snd))
33c39a69 2208 (*ifp->if_start)(ifp);
984263bc
MD
2209}
2210
2211static void
33c39a69 2212bge_tick(void *xsc)
78195a76
MD
2213{
2214 struct bge_softc *sc = xsc;
2215 struct ifnet *ifp = &sc->arpcom.ac_if;
2216
2217 lwkt_serialize_enter(ifp->if_serializer);
2218 bge_tick_serialized(xsc);
2219 lwkt_serialize_exit(ifp->if_serializer);
2220}
2221
2222static void
2223bge_tick_serialized(void *xsc)
984263bc 2224{
33c39a69
JS
2225 struct bge_softc *sc = xsc;
2226 struct ifnet *ifp = &sc->arpcom.ac_if;
984263bc
MD
2227 struct mii_data *mii = NULL;
2228 struct ifmedia *ifm = NULL;
984263bc 2229
9a6ee7e2
JS
2230 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2231 sc->bge_asicrev == BGE_ASICREV_BCM5750)
7e40b8c5
HP
2232 bge_stats_update_regs(sc);
2233 else
2234 bge_stats_update(sc);
9a717c15 2235
263489fb 2236 callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
9a717c15 2237
984263bc 2238 if (sc->bge_link) {
984263bc
MD
2239 return;
2240 }
2241
2242 if (sc->bge_tbi) {
2243 ifm = &sc->bge_ifmedia;
2244 if (CSR_READ_4(sc, BGE_MAC_STS) &
2245 BGE_MACSTAT_TBI_PCS_SYNCHED) {
2246 sc->bge_link++;
70059b3c
JS
2247 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
2248 BGE_CLRBIT(sc, BGE_MAC_MODE,
2249 BGE_MACMODE_TBI_SEND_CFGS);
2250 }
984263bc 2251 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
c6fd6f3b 2252 if_printf(ifp, "gigabit link up\n");
936ff230 2253 if (!ifq_is_empty(&ifp->if_snd))
33c39a69 2254 (*ifp->if_start)(ifp);
984263bc 2255 }
984263bc
MD
2256 return;
2257 }
2258
2259 mii = device_get_softc(sc->bge_miibus);
2260 mii_tick(mii);
2261
2262 if (!sc->bge_link) {
2263 mii_pollstat(mii);
2264 if (mii->mii_media_status & IFM_ACTIVE &&
2265 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2266 sc->bge_link++;
7f259627 2267 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
984263bc 2268 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
c6fd6f3b 2269 if_printf(ifp, "gigabit link up\n");
936ff230 2270 if (!ifq_is_empty(&ifp->if_snd))
33c39a69 2271 (*ifp->if_start)(ifp);
984263bc
MD
2272 }
2273 }
984263bc
MD
2274}
2275
7e40b8c5 2276static void
33c39a69 2277bge_stats_update_regs(struct bge_softc *sc)
7e40b8c5 2278{
33c39a69 2279 struct ifnet *ifp = &sc->arpcom.ac_if;
7e40b8c5 2280 struct bge_mac_stats_regs stats;
33c39a69 2281 uint32_t *s;
7e40b8c5
HP
2282 int i;
2283
33c39a69 2284 s = (uint32_t *)&stats;
7e40b8c5
HP
2285 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2286 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2287 s++;
2288 }
2289
2290 ifp->if_collisions +=
2291 (stats.dot3StatsSingleCollisionFrames +
2292 stats.dot3StatsMultipleCollisionFrames +
2293 stats.dot3StatsExcessiveCollisions +
2294 stats.dot3StatsLateCollisions) -
2295 ifp->if_collisions;
7e40b8c5
HP
2296}
2297
984263bc 2298static void
33c39a69 2299bge_stats_update(struct bge_softc *sc)
984263bc 2300{
33c39a69 2301 struct ifnet *ifp = &sc->arpcom.ac_if;
984263bc
MD
2302 struct bge_stats *stats;
2303
984263bc
MD
2304 stats = (struct bge_stats *)(sc->bge_vhandle +
2305 BGE_MEMWIN_START + BGE_STATS_BLOCK);
2306
2307 ifp->if_collisions +=
7e40b8c5
HP
2308 (stats->txstats.dot3StatsSingleCollisionFrames.bge_addr_lo +
2309 stats->txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo +
2310 stats->txstats.dot3StatsExcessiveCollisions.bge_addr_lo +
2311 stats->txstats.dot3StatsLateCollisions.bge_addr_lo) -
984263bc
MD
2312 ifp->if_collisions;
2313
2314#ifdef notdef
2315 ifp->if_collisions +=
2316 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2317 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2318 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2319 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2320 ifp->if_collisions;
2321#endif
984263bc
MD
2322}
2323
2324/*
2325 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2326 * pointers to descriptors.
2327 */
2328static int
33c39a69 2329bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx)
984263bc 2330{
33c39a69
JS
2331 struct bge_tx_bd *f = NULL;
2332 struct mbuf *m;
2333 uint32_t frag, cur, cnt = 0;
2334 uint16_t csum_flags = 0;
2335 struct ifvlan *ifv = NULL;
984263bc
MD
2336
2337 if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
2338 m_head->m_pkthdr.rcvif != NULL &&
2339 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
2340 ifv = m_head->m_pkthdr.rcvif->if_softc;
2341
2342 m = m_head;
2343 cur = frag = *txidx;
2344
2345 if (m_head->m_pkthdr.csum_flags) {
2346 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2347 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2348 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2349 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2350 if (m_head->m_flags & M_LASTFRAG)
2351 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2352 else if (m_head->m_flags & M_FRAG)
2353 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2354 }
2355 /*
2356 * Start packing the mbufs in this chain into
2357 * the fragment pointers. Stop when we run out
2358 * of fragments or hit the end of the mbuf chain.
2359 */
2360 for (m = m_head; m != NULL; m = m->m_next) {
2361 if (m->m_len != 0) {
2362 f = &sc->bge_rdata->bge_tx_ring[frag];
2363 if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
2364 break;
7e40b8c5
HP
2365 BGE_HOSTADDR(f->bge_addr,
2366 vtophys(mtod(m, vm_offset_t)));
984263bc
MD
2367 f->bge_len = m->m_len;
2368 f->bge_flags = csum_flags;
2369 if (ifv != NULL) {
2370 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2371 f->bge_vlan_tag = ifv->ifv_tag;
2372 } else {
2373 f->bge_vlan_tag = 0;
2374 }
2375 /*
2376 * Sanity check: avoid coming within 16 descriptors
2377 * of the end of the ring.
2378 */
2379 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16)
2380 return(ENOBUFS);
2381 cur = frag;
2382 BGE_INC(frag, BGE_TX_RING_CNT);
2383 cnt++;
2384 }
2385 }
2386
2387 if (m != NULL)
2388 return(ENOBUFS);
2389
2390 if (frag == sc->bge_tx_saved_considx)
2391 return(ENOBUFS);
2392
2393 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
2394 sc->bge_cdata.bge_tx_chain[cur] = m_head;
2395 sc->bge_txcnt += cnt;
2396
2397 *txidx = frag;
2398
2399 return(0);
2400}
2401
2402/*
2403 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2404 * to the mbuf data regions directly in the transmit descriptors.
2405 */
2406static void
33c39a69 2407bge_start(struct ifnet *ifp)
984263bc
MD
2408{
2409 struct bge_softc *sc;
2410 struct mbuf *m_head = NULL;
33c39a69 2411 uint32_t prodidx = 0;
2f54d1d2 2412 int need_trans;
984263bc
MD
2413
2414 sc = ifp->if_softc;
2415
936ff230 2416 if (!sc->bge_link)
984263bc
MD
2417 return;
2418
94db8384 2419 prodidx = sc->bge_tx_prodidx;
984263bc 2420
2f54d1d2 2421 need_trans = 0;
984263bc 2422 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
936ff230 2423 m_head = ifq_poll(&ifp->if_snd);
984263bc
MD
2424 if (m_head == NULL)
2425 break;
2426
2427 /*
2428 * XXX
2429 * safety overkill. If this is a fragmented packet chain
2430 * with delayed TCP/UDP checksums, then only encapsulate
2431 * it if we have enough descriptors to handle the entire
2432 * chain at once.
2433 * (paranoia -- may not actually be needed)
2434 */
2435 if (m_head->m_flags & M_FIRSTFRAG &&
2436 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
2437 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2438 m_head->m_pkthdr.csum_data + 16) {
984263bc
MD
2439 ifp->if_flags |= IFF_OACTIVE;
2440 break;
2441 }
2442 }
2443
2444 /*
2445 * Pack the data into the transmit ring. If we
2446 * don't have room, set the OACTIVE flag and wait
2447 * for the NIC to drain the ring.
2448 */
2449 if (bge_encap(sc, m_head, &prodidx)) {
984263bc
MD
2450 ifp->if_flags |= IFF_OACTIVE;
2451 break;
2452 }
d2c71fa0 2453 ifq_dequeue(&ifp->if_snd, m_head);
2f54d1d2 2454 need_trans = 1;
984263bc 2455
7600679e 2456 BPF_MTAP(ifp, m_head);
984263bc
MD
2457 }
2458
2f54d1d2
SZ
2459 if (!need_trans)
2460 return;
2461
984263bc
MD
2462 /* Transmit */
2463 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2464 /* 5700 b2 errata */
2465 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
2466 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2467
94db8384
SZ
2468 sc->bge_tx_prodidx = prodidx;
2469
984263bc
MD
2470 /*
2471 * Set a timeout in case the chip goes out to lunch.
2472 */
2473 ifp->if_timer = 5;
984263bc
MD
2474}
2475
2476static void
33c39a69 2477bge_init(void *xsc)
984263bc
MD
2478{
2479 struct bge_softc *sc = xsc;
33c39a69
JS
2480 struct ifnet *ifp = &sc->arpcom.ac_if;
2481 uint16_t *m;
984263bc 2482
aa65409c
SZ
2483 ASSERT_SERIALIZED(ifp->if_serializer);
2484
2485 if (ifp->if_flags & IFF_RUNNING)
984263bc 2486 return;
984263bc
MD
2487
2488 /* Cancel pending I/O and flush buffers. */
2489 bge_stop(sc);
2490 bge_reset(sc);
2491 bge_chipinit(sc);
2492
2493 /*
2494 * Init the various state machines, ring
2495 * control blocks and firmware.
2496 */
2497 if (bge_blockinit(sc)) {
c6fd6f3b 2498 if_printf(ifp, "initialization failure\n");
984263bc
MD
2499 return;
2500 }
2501
984263bc
MD
2502 /* Specify MTU. */
2503 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
011c0f93 2504 ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
984263bc
MD
2505
2506 /* Load our MAC address. */
33c39a69 2507 m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
984263bc
MD
2508 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2509 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2510
2511 /* Enable or disable promiscuous mode as needed. */
6439b28a 2512 bge_setpromisc(sc);
984263bc
MD
2513
2514 /* Program multicast filter. */
2515 bge_setmulti(sc);
2516
2517 /* Init RX ring. */
2518 bge_init_rx_ring_std(sc);
2519
7e40b8c5
HP
2520 /*
2521 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
2522 * memory to insure that the chip has in fact read the first
2523 * entry of the ring.
2524 */
2525 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
33c39a69 2526 uint32_t v, i;
7e40b8c5
HP
2527 for (i = 0; i < 10; i++) {
2528 DELAY(20);
2529 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
2530 if (v == (MCLBYTES - ETHER_ALIGN))
2531 break;
2532 }
2533 if (i == 10)
c6fd6f3b 2534 if_printf(ifp, "5705 A0 chip failed to load RX ring\n");
7e40b8c5
HP
2535 }
2536
984263bc
MD
2537 /* Init jumbo RX ring. */
2538 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2539 bge_init_rx_ring_jumbo(sc);
2540
2541 /* Init our RX return ring index */
2542 sc->bge_rx_saved_considx = 0;
2543
2544 /* Init TX ring. */
2545 bge_init_tx_ring(sc);
2546
2547 /* Turn on transmitter */
2548 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
2549
2550 /* Turn on receiver */
2551 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2552
2553 /* Tell firmware we're alive. */
2554 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2555
2556 /* Enable host interrupts. */
2557 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
2558 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2559 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2560
2561 bge_ifmedia_upd(ifp);
2562
2563 ifp->if_flags |= IFF_RUNNING;
2564 ifp->if_flags &= ~IFF_OACTIVE;
2565
263489fb 2566 callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
984263bc
MD
2567}
2568
2569/*
2570 * Set media options.
2571 */
2572static int
33c39a69 2573bge_ifmedia_upd(struct ifnet *ifp)
984263bc 2574{
33c39a69
JS
2575 struct bge_softc *sc = ifp->if_softc;
2576 struct ifmedia *ifm = &sc->bge_ifmedia;
984263bc 2577 struct mii_data *mii;
984263bc
MD
2578
2579 /* If this is a 1000baseX NIC, enable the TBI port. */
2580 if (sc->bge_tbi) {
2581 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2582 return(EINVAL);
2583 switch(IFM_SUBTYPE(ifm->ifm_media)) {
2584 case IFM_AUTO:
70059b3c
JS
2585 /*
2586 * The BCM5704 ASIC appears to have a special
2587 * mechanism for programming the autoneg
2588 * advertisement registers in TBI mode.
2589 */
5c56d5d8
SZ
2590 if (!bge_fake_autoneg &&
2591 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
70059b3c
JS
2592 uint32_t sgdig;
2593
2594 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
2595 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
2596 sgdig |= BGE_SGDIGCFG_AUTO |
2597 BGE_SGDIGCFG_PAUSE_CAP |
2598 BGE_SGDIGCFG_ASYM_PAUSE;
2599 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
2600 sgdig | BGE_SGDIGCFG_SEND);
2601 DELAY(5);
2602 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
2603 }
984263bc
MD
2604 break;
2605 case IFM_1000_SX:
2606 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
2607 BGE_CLRBIT(sc, BGE_MAC_MODE,
2608 BGE_MACMODE_HALF_DUPLEX);
2609 } else {
2610 BGE_SETBIT(sc, BGE_MAC_MODE,
2611 BGE_MACMODE_HALF_DUPLEX);
2612 }
2613 break;
2614 default:
2615 return(EINVAL);
2616 }
2617 return(0);
2618 }
2619
2620 mii = device_get_softc(sc->bge_miibus);
2621 sc->bge_link = 0;
2622 if (mii->mii_instance) {
2623 struct mii_softc *miisc;
c8a8a42c 2624 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
984263bc
MD
2625 mii_phy_reset(miisc);
2626 }
2627 mii_mediachg(mii);
2628
2629 return(0);
2630}
2631
2632/*
2633 * Report current media status.
2634 */
2635static void
33c39a69 2636bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
984263bc 2637{
33c39a69 2638 struct bge_softc *sc = ifp->if_softc;
984263bc
MD
2639 struct mii_data *mii;
2640
984263bc
MD
2641 if (sc->bge_tbi) {
2642 ifmr->ifm_status = IFM_AVALID;
2643 ifmr->ifm_active = IFM_ETHER;
2644 if (CSR_READ_4(sc, BGE_MAC_STS) &
2645 BGE_MACSTAT_TBI_PCS_SYNCHED)
2646 ifmr->ifm_status |= IFM_ACTIVE;
2647 ifmr->ifm_active |= IFM_1000_SX;
2648 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
2649 ifmr->ifm_active |= IFM_HDX;
2650 else
2651 ifmr->ifm_active |= IFM_FDX;
2652 return;
2653 }
2654
2655 mii = device_get_softc(sc->bge_miibus);
2656 mii_pollstat(mii);
2657 ifmr->ifm_active = mii->mii_media_active;
2658 ifmr->ifm_status = mii->mii_media_status;
984263bc
MD
2659}
2660
2661static int
33c39a69 2662bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
984263bc
MD
2663{
2664 struct bge_softc *sc = ifp->if_softc;
2665 struct ifreq *ifr = (struct ifreq *) data;
9a717c15 2666 int mask, error = 0;
984263bc
MD
2667 struct mii_data *mii;
2668
aa65409c
SZ
2669 ASSERT_SERIALIZED(ifp->if_serializer);
2670
984263bc 2671 switch(command) {
984263bc 2672 case SIOCSIFMTU:
9a6ee7e2
JS
2673 /* Disallow jumbo frames on 5705/5750. */
2674 if (((sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2675 sc->bge_asicrev == BGE_ASICREV_BCM5750) &&
2676 ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
984263bc
MD
2677 error = EINVAL;
2678 else {
2679 ifp->if_mtu = ifr->ifr_mtu;
2680 ifp->if_flags &= ~IFF_RUNNING;
2681 bge_init(sc);
2682 }
2683 break;
2684 case SIOCSIFFLAGS:
2685 if (ifp->if_flags & IFF_UP) {
6439b28a
SZ
2686 if (ifp->if_flags & IFF_RUNNING) {
2687 int flags = ifp->if_flags & sc->bge_if_flags;
2688
2689 /*
2690 * If only the state of the PROMISC flag
2691 * changed, then just use the 'set promisc
2692 * mode' command instead of reinitializing
2693 * the entire NIC. Doing a full re-init
2694 * means reloading the firmware and waiting
2695 * for it to start up, which may take a
2696 * second or two. Similarly for ALLMULTI.
2697 */
2698 if (flags & IFF_PROMISC)
2699 bge_setpromisc(sc);
2700 if (flags & IFF_ALLMULTI)
2701 bge_setmulti(sc);
2702 } else {
984263bc 2703 bge_init(sc);
6439b28a 2704 }
984263bc 2705 } else {
aa65409c 2706 if (ifp->if_flags & IFF_RUNNING)
984263bc 2707 bge_stop(sc);
984263bc
MD
2708 }
2709 sc->bge_if_flags = ifp->if_flags;
2710 error = 0;
2711 break;
2712 case SIOCADDMULTI:
2713 case SIOCDELMULTI:
2714 if (ifp->if_flags & IFF_RUNNING) {
2715 bge_setmulti(sc);
2716 error = 0;
2717 }
2718 break;
2719 case SIOCSIFMEDIA:
2720 case SIOCGIFMEDIA:
2721 if (sc->bge_tbi) {
2722 error = ifmedia_ioctl(ifp, ifr,
2723 &sc->bge_ifmedia, command);
2724 } else {
2725 mii = device_get_softc(sc->bge_miibus);
2726 error = ifmedia_ioctl(ifp, ifr,
2727 &mii->mii_media, command);
2728 }
2729 break;
2730 case SIOCSIFCAP:
2731 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2732 if (mask & IFCAP_HWCSUM) {
2733 if (IFCAP_HWCSUM & ifp->if_capenable)
2734 ifp->if_capenable &= ~IFCAP_HWCSUM;
2735 else
2736 ifp->if_capenable |= IFCAP_HWCSUM;
2737 }
2738 error = 0;
2739 break;
2740 default:
4cde4dd5 2741 error = ether_ioctl(ifp, command, data);
984263bc
MD
2742 break;
2743 }
984263bc
MD
2744 return(error);
2745}
2746
2747static void
33c39a69 2748bge_watchdog(struct ifnet *ifp)
984263bc 2749{
33c39a69 2750 struct bge_softc *sc = ifp->if_softc;
984263bc 2751
c6fd6f3b 2752 if_printf(ifp, "watchdog timeout -- resetting\n");
984263bc
MD
2753
2754 ifp->if_flags &= ~IFF_RUNNING;
2755 bge_init(sc);
2756
2757 ifp->if_oerrors++;
2f54d1d2
SZ
2758
2759 if (!ifq_is_empty(&ifp->if_snd))
2760 ifp->if_start(ifp);
984263bc
MD
2761}
2762
2763/*
2764 * Stop the adapter and free any mbufs allocated to the
2765 * RX and TX lists.
2766 */
2767static void
33c39a69 2768bge_stop(struct bge_softc *sc)
984263bc 2769{
33c39a69 2770 struct ifnet *ifp = &sc->arpcom.ac_if;
984263bc
MD
2771 struct ifmedia_entry *ifm;
2772 struct mii_data *mii = NULL;
2773 int mtmp, itmp;
2774
aa65409c
SZ
2775 ASSERT_SERIALIZED(ifp->if_serializer);
2776
984263bc
MD
2777 if (!sc->bge_tbi)
2778 mii = device_get_softc(sc->bge_miibus);
2779
263489fb 2780 callout_stop(&sc->bge_stat_timer);
984263bc
MD
2781
2782 /*
2783 * Disable all of the receiver blocks
2784 */
2785 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2786 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2787 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
9a6ee7e2
JS
2788 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2789 sc->bge_asicrev != BGE_ASICREV_BCM5750)
7e40b8c5 2790 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
984263bc
MD
2791 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
2792 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2793 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
2794
2795 /*
2796 * Disable all of the transmit blocks
2797 */
2798 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2799 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2800 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2801 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
2802 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
9a6ee7e2
JS
2803 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2804 sc->bge_asicrev != BGE_ASICREV_BCM5750)
7e40b8c5 2805 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
984263bc
MD
2806 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2807
2808 /*
2809 * Shut down all of the memory managers and related
2810 * state machines.
2811 */
2812 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
2813 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
9a6ee7e2
JS
2814 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2815 sc->bge_asicrev != BGE_ASICREV_BCM5750)
7e40b8c5 2816 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
984263bc
MD
2817 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
2818 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
9a6ee7e2
JS
2819 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2820 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
7e40b8c5
HP
2821 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
2822 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2823 }
984263bc
MD
2824
2825 /* Disable host interrupts. */
2826 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2827 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2828
2829 /*
2830 * Tell firmware we're shutting down.
2831 */
2832 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2833
2834 /* Free the RX lists. */
2835 bge_free_rx_ring_std(sc);
2836
2837 /* Free jumbo RX list. */
9a6ee7e2
JS
2838 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2839 sc->bge_asicrev != BGE_ASICREV_BCM5750)
7e40b8c5 2840 bge_free_rx_ring_jumbo(sc);
984263bc
MD
2841
2842 /* Free TX buffers. */
2843 bge_free_tx_ring(sc);
2844
2845 /*
2846 * Isolate/power down the PHY, but leave the media selection
2847 * unchanged so that things will be put back to normal when
2848 * we bring the interface back up.
2849 */
2850 if (!sc->bge_tbi) {
2851 itmp = ifp->if_flags;
2852 ifp->if_flags |= IFF_UP;
2853 ifm = mii->mii_media.ifm_cur;
2854 mtmp = ifm->ifm_media;
2855 ifm->ifm_media = IFM_ETHER|IFM_NONE;
2856 mii_mediachg(mii);
2857 ifm->ifm_media = mtmp;
2858 ifp->if_flags = itmp;
2859 }
2860
2861 sc->bge_link = 0;
2862
2863 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
2864
2865 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
984263bc
MD
2866}
2867
2868/*
2869 * Stop all chip I/O so that the kernel's probe routines don't
2870 * get confused by errant DMAs when rebooting.
2871 */
2872static void
33c39a69 2873bge_shutdown(device_t dev)
984263bc 2874{
33c39a69 2875 struct bge_softc *sc = device_get_softc(dev);
aa65409c 2876 struct ifnet *ifp = &sc->arpcom.ac_if;
984263bc 2877
aa65409c
SZ
2878 lwkt_serialize_enter(ifp->if_serializer);
2879 bge_stop(sc);
984263bc 2880 bge_reset(sc);
aa65409c
SZ
2881 lwkt_serialize_exit(ifp->if_serializer);
2882}
2883
2884static int
2885bge_suspend(device_t dev)
2886{
2887 struct bge_softc *sc = device_get_softc(dev);
2888 struct ifnet *ifp = &sc->arpcom.ac_if;
2889
2890 lwkt_serialize_enter(ifp->if_serializer);
2891 bge_stop(sc);
2892 lwkt_serialize_exit(ifp->if_serializer);
2893
2894 return 0;
2895}
2896
2897static int
2898bge_resume(device_t dev)
2899{
2900 struct bge_softc *sc = device_get_softc(dev);
2901 struct ifnet *ifp = &sc->arpcom.ac_if;
2902
2903 lwkt_serialize_enter(ifp->if_serializer);
2904
2905 if (ifp->if_flags & IFF_UP) {
2906 bge_init(sc);
2907
2908 if (ifp->if_flags & IFF_RUNNING)
2909 ifp->if_start(ifp);
2910 }
2911
2912 lwkt_serialize_exit(ifp->if_serializer);
2913
2914 return 0;
984263bc 2915}
6439b28a
SZ
2916
2917static void
2918bge_setpromisc(struct bge_softc *sc)
2919{
2920 struct ifnet *ifp = &sc->arpcom.ac_if;
2921
2922 if (ifp->if_flags & IFF_PROMISC)
2923 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
2924 else
2925 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
2926}