Convert to critical sections. Add a missing ether_ifdetach when
[dragonfly.git] / sys / dev / netif / bge / if_bge.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 *
7e40b8c5 33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.29 2003/12/01 21:06:59 ambrisko Exp $
b542cd49 34 * $DragonFly: src/sys/dev/netif/bge/if_bge.c,v 1.38 2005/05/31 14:11:42 joerg Exp $
1de703da 35 *
984263bc
MD
36 */
37
38/*
39 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
40 *
41 * Written by Bill Paul <wpaul@windriver.com>
42 * Senior Engineer, Wind River Systems
43 */
44
45/*
46 * The Broadcom BCM5700 is based on technology originally developed by
47 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
49 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
51 * frames, highly configurable RX filtering, and 16 RX and TX queues
52 * (which, along with RX filter rules, can be used for QOS applications).
53 * Other features, such as TCP segmentation, may be available as part
54 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55 * firmware images can be stored in hardware and need not be compiled
56 * into the driver.
57 *
58 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
60 *
61 * The BCM5701 is a single-chip solution incorporating both the BCM5700
62 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63 * does not support external SSRAM.
64 *
65 * Broadcom also produces a variation of the BCM5700 under the "Altima"
66 * brand name, which is functionally similar but lacks PCI-X support.
67 *
68 * Without external SSRAM, you can only have at most 4 TX rings,
69 * and the use of the mini RX ring is disabled. This seems to imply
70 * that these features are simply not available on the BCM5701. As a
71 * result, this driver does not implement any support for the mini RX
72 * ring.
73 */
74
75#include <sys/param.h>
76#include <sys/systm.h>
77#include <sys/sockio.h>
78#include <sys/mbuf.h>
79#include <sys/malloc.h>
80#include <sys/kernel.h>
81#include <sys/socket.h>
82#include <sys/queue.h>
83
84#include <net/if.h>
936ff230 85#include <net/ifq_var.h>
984263bc
MD
86#include <net/if_arp.h>
87#include <net/ethernet.h>
88#include <net/if_dl.h>
89#include <net/if_media.h>
90
91#include <net/bpf.h>
92
93#include <net/if_types.h>
1f2de5d4 94#include <net/vlan/if_vlan_var.h>
984263bc
MD
95
96#include <netinet/in_systm.h>
97#include <netinet/in.h>
98#include <netinet/ip.h>
99
100#include <vm/vm.h> /* for vtophys */
101#include <vm/pmap.h> /* for vtophys */
984263bc
MD
102#include <machine/resource.h>
103#include <sys/bus.h>
104#include <sys/rman.h>
105
1f2de5d4
MD
106#include <dev/netif/mii_layer/mii.h>
107#include <dev/netif/mii_layer/miivar.h>
108#include <dev/netif/mii_layer/miidevs.h>
109#include <dev/netif/mii_layer/brgphyreg.h>
984263bc 110
f952ab63 111#include <bus/pci/pcidevs.h>
1f2de5d4
MD
112#include <bus/pci/pcireg.h>
113#include <bus/pci/pcivar.h>
984263bc 114
1f2de5d4 115#include "if_bgereg.h"
984263bc
MD
116
117#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
118
119/* "controller miibus0" required. See GENERIC if you get errors here. */
120#include "miibus_if.h"
121
984263bc
MD
122/*
123 * Various supported device vendors/types and their names. Note: the
124 * spec seems to indicate that the hardware still has Alteon's vendor
125 * ID burned into it, though it will always be overriden by the vendor
126 * ID in the EEPROM. Just to be safe, we cover all possibilities.
127 */
128#define BGE_DEVDESC_MAX 64 /* Maximum device description length */
129
130static struct bge_type bge_devs[] = {
f952ab63 131 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700,
984263bc 132 "Broadcom BCM5700 Gigabit Ethernet" },
f952ab63 133 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700,
984263bc 134 "Broadcom BCM5701 Gigabit Ethernet" },
f952ab63 135 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700,
984263bc 136 "Broadcom BCM5700 Gigabit Ethernet" },
f952ab63 137 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701,
984263bc 138 "Broadcom BCM5701 Gigabit Ethernet" },
f952ab63 139 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X,
984263bc 140 "Broadcom BCM5702X Gigabit Ethernet" },
f952ab63
JS
141 { PCI_VENDOR_BROADCOM, BCOM_DEVICEID_BCM5702X,
142 "Broadcom BCM5702X Gigabit Ethernet" },
143 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X,
144 "Broadcom BCM5703X Gigabit Ethernet" },
145 { PCI_VENDOR_BROADCOM, BCOM_DEVICEID_BCM5703X,
984263bc 146 "Broadcom BCM5703X Gigabit Ethernet" },
f952ab63 147 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C,
984263bc 148 "Broadcom BCM5704C Dual Gigabit Ethernet" },
f952ab63 149 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S,
984263bc 150 "Broadcom BCM5704S Dual Gigabit Ethernet" },
f952ab63 151 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705,
7e40b8c5 152 "Broadcom BCM5705 Gigabit Ethernet" },
f952ab63 153 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M,
7e40b8c5 154 "Broadcom BCM5705M Gigabit Ethernet" },
f952ab63 155 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705_ALT,
7e40b8c5 156 "Broadcom BCM5705M Gigabit Ethernet" },
f952ab63 157 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782,
7e40b8c5 158 "Broadcom BCM5782 Gigabit Ethernet" },
f952ab63
JS
159 { PCI_VENDOR_BROADCOM, BCOM_DEVICEID_BCM5788,
160 "Broadcom BCM5788 Gigabit Ethernet" },
161 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901,
162 "Broadcom BCM5901 Fast Ethernet" },
163 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2,
164 "Broadcom BCM5901A2 Fast Ethernet" },
165 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
984263bc 166 "SysKonnect Gigabit Ethernet" },
f952ab63 167 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000,
984263bc 168 "Altima AC1000 Gigabit Ethernet" },
f952ab63 169 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001,
7e40b8c5 170 "Altima AC1002 Gigabit Ethernet" },
f952ab63 171 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100,
984263bc
MD
172 "Altima AC9100 Gigabit Ethernet" },
173 { 0, 0, NULL }
174};
175
33c39a69
JS
176static int bge_probe(device_t);
177static int bge_attach(device_t);
178static int bge_detach(device_t);
179static void bge_release_resources(struct bge_softc *);
180static void bge_txeof(struct bge_softc *);
181static void bge_rxeof(struct bge_softc *);
182
183static void bge_tick(void *);
184static void bge_stats_update(struct bge_softc *);
185static void bge_stats_update_regs(struct bge_softc *);
186static int bge_encap(struct bge_softc *, struct mbuf *, uint32_t *);
187
188static void bge_intr(void *);
189static void bge_start(struct ifnet *);
190static int bge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
191static void bge_init(void *);
192static void bge_stop(struct bge_softc *);
193static void bge_watchdog(struct ifnet *);
194static void bge_shutdown(device_t);
195static int bge_ifmedia_upd(struct ifnet *);
196static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
197
198static uint8_t bge_eeprom_getbyte(struct bge_softc *, uint32_t, uint8_t *);
199static int bge_read_eeprom(struct bge_softc *, caddr_t, uint32_t, size_t);
200
33c39a69
JS
201static void bge_setmulti(struct bge_softc *);
202
203static void bge_handle_events(struct bge_softc *);
204static int bge_alloc_jumbo_mem(struct bge_softc *);
205static void bge_free_jumbo_mem(struct bge_softc *);
2aa9b12f
JS
206static struct bge_jslot
207 *bge_jalloc(struct bge_softc *);
208static void bge_jfree(void *);
209static void bge_jref(void *);
33c39a69
JS
210static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *);
211static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
212static int bge_init_rx_ring_std(struct bge_softc *);
213static void bge_free_rx_ring_std(struct bge_softc *);
214static int bge_init_rx_ring_jumbo(struct bge_softc *);
215static void bge_free_rx_ring_jumbo(struct bge_softc *);
216static void bge_free_tx_ring(struct bge_softc *);
217static int bge_init_tx_ring(struct bge_softc *);
218
219static int bge_chipinit(struct bge_softc *);
220static int bge_blockinit(struct bge_softc *);
984263bc
MD
221
222#ifdef notdef
33c39a69
JS
223static uint8_t bge_vpd_readbyte(struct bge_softc *, uint32_t);
224static void bge_vpd_read_res(struct bge_softc *, struct vpd_res *, uint32_t);
225static void bge_vpd_read(struct bge_softc *);
984263bc
MD
226#endif
227
33c39a69
JS
228static uint32_t bge_readmem_ind(struct bge_softc *, uint32_t);
229static void bge_writemem_ind(struct bge_softc *, uint32_t, uint32_t);
984263bc 230#ifdef notdef
33c39a69 231static uint32_t bge_readreg_ind(struct bge_softc *, uint32_t);
984263bc 232#endif
33c39a69 233static void bge_writereg_ind(struct bge_softc *, uint32_t, uint32_t);
984263bc 234
33c39a69
JS
235static int bge_miibus_readreg(device_t, int, int);
236static int bge_miibus_writereg(device_t, int, int, int);
237static void bge_miibus_statchg(device_t);
984263bc 238
33c39a69 239static void bge_reset(struct bge_softc *);
984263bc
MD
240
241static device_method_t bge_methods[] = {
242 /* Device interface */
243 DEVMETHOD(device_probe, bge_probe),
244 DEVMETHOD(device_attach, bge_attach),
245 DEVMETHOD(device_detach, bge_detach),
246 DEVMETHOD(device_shutdown, bge_shutdown),
247
248 /* bus interface */
249 DEVMETHOD(bus_print_child, bus_generic_print_child),
250 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
251
252 /* MII interface */
253 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
254 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
255 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
256
257 { 0, 0 }
258};
259
33c39a69 260static DEFINE_CLASS_0(bge, bge_driver, bge_methods, sizeof(struct bge_softc));
984263bc
MD
261static devclass_t bge_devclass;
262
32832096 263DECLARE_DUMMY_MODULE(if_bge);
984263bc
MD
264DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, 0, 0);
265DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
266
33c39a69
JS
267static uint32_t
268bge_readmem_ind(struct bge_softc *sc, uint32_t off)
984263bc 269{
33c39a69 270 device_t dev = sc->bge_dev;
984263bc
MD
271
272 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
273 return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
274}
275
276static void
33c39a69 277bge_writemem_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
984263bc 278{
33c39a69 279 device_t dev = sc->bge_dev;
984263bc
MD
280
281 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
282 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
984263bc
MD
283}
284
285#ifdef notdef
33c39a69
JS
286static uint32_t
287bge_readreg_ind(struct bge_softc *sc, uin32_t off)
984263bc 288{
33c39a69 289 device_t dev = sc->bge_dev;
984263bc
MD
290
291 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
292 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
293}
294#endif
295
296static void
33c39a69 297bge_writereg_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
984263bc 298{
33c39a69 299 device_t dev = sc->bge_dev;
984263bc
MD
300
301 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
302 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
984263bc
MD
303}
304
305#ifdef notdef
33c39a69
JS
306static uint8_t
307bge_vpd_readbyte(struct bge_softc *sc, uint32_t addr)
984263bc 308{
33c39a69
JS
309 device_t dev = sc->bge_dev;
310 uint32_t val;
984263bc 311 int i;
984263bc 312
984263bc
MD
313 pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
314 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
315 DELAY(10);
316 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
317 break;
318 }
319
320 if (i == BGE_TIMEOUT) {
c6fd6f3b 321 device_printf(sc->bge_dev, "VPD read timed out\n");
984263bc
MD
322 return(0);
323 }
324
325 val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
326
327 return((val >> ((addr % 4) * 8)) & 0xFF);
328}
329
330static void
33c39a69 331bge_vpd_read_res(struct bge_softc *sc, struct vpd_res *res, uint32_t addr)
984263bc 332{
33c39a69
JS
333 size_t i;
334 uint8_t *ptr;
984263bc 335
33c39a69 336 ptr = (uint8_t *)res;
984263bc
MD
337 for (i = 0; i < sizeof(struct vpd_res); i++)
338 ptr[i] = bge_vpd_readbyte(sc, i + addr);
339
340 return;
341}
342
343static void
33c39a69 344bge_vpd_read(struct bge_softc *sc)
984263bc
MD
345{
346 int pos = 0, i;
347 struct vpd_res res;
348
349 if (sc->bge_vpd_prodname != NULL)
350 free(sc->bge_vpd_prodname, M_DEVBUF);
351 if (sc->bge_vpd_readonly != NULL)
352 free(sc->bge_vpd_readonly, M_DEVBUF);
353 sc->bge_vpd_prodname = NULL;
354 sc->bge_vpd_readonly = NULL;
355
356 bge_vpd_read_res(sc, &res, pos);
357
358 if (res.vr_id != VPD_RES_ID) {
c6fd6f3b
JS
359 device_printf(sc->bge_dev,
360 "bad VPD resource id: expected %x got %x\n",
361 VPD_RES_ID, res.vr_id);
984263bc
MD
362 return;
363 }
364
365 pos += sizeof(res);
c5541aee 366 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_INTWAIT);
984263bc
MD
367 for (i = 0; i < res.vr_len; i++)
368 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
369 sc->bge_vpd_prodname[i] = '\0';
370 pos += i;
371
372 bge_vpd_read_res(sc, &res, pos);
373
374 if (res.vr_id != VPD_RES_READ) {
c6fd6f3b
JS
375 device_printf(sc->bge_dev,
376 "bad VPD resource id: expected %x got %x\n",
377 VPD_RES_READ, res.vr_id);
984263bc
MD
378 return;
379 }
380
381 pos += sizeof(res);
c5541aee 382 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_INTWAIT);
984263bc
MD
383 for (i = 0; i < res.vr_len + 1; i++)
384 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
984263bc
MD
385}
386#endif
387
388/*
389 * Read a byte of data stored in the EEPROM at address 'addr.' The
390 * BCM570x supports both the traditional bitbang interface and an
391 * auto access interface for reading the EEPROM. We use the auto
392 * access method.
393 */
33c39a69
JS
394static uint8_t
395bge_eeprom_getbyte(struct bge_softc *sc, uint32_t addr, uint8_t *dest)
984263bc
MD
396{
397 int i;
33c39a69 398 uint32_t byte = 0;
984263bc
MD
399
400 /*
401 * Enable use of auto EEPROM access so we can avoid
402 * having to use the bitbang method.
403 */
404 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
405
406 /* Reset the EEPROM, load the clock period. */
407 CSR_WRITE_4(sc, BGE_EE_ADDR,
408 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
409 DELAY(20);
410
411 /* Issue the read EEPROM command. */
412 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
413
414 /* Wait for completion */
415 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
416 DELAY(10);
417 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
418 break;
419 }
420
421 if (i == BGE_TIMEOUT) {
c6fd6f3b 422 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
984263bc
MD
423 return(0);
424 }
425
426 /* Get result. */
427 byte = CSR_READ_4(sc, BGE_EE_DATA);
428
429 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
430
431 return(0);
432}
433
434/*
435 * Read a sequence of bytes from the EEPROM.
436 */
437static int
33c39a69 438bge_read_eeprom(struct bge_softc *sc, caddr_t dest, uint32_t off, size_t len)
984263bc 439{
33c39a69
JS
440 size_t i;
441 int err;
442 uint8_t byte;
984263bc 443
33c39a69 444 for (byte = 0, err = 0, i = 0; i < len; i++) {
984263bc
MD
445 err = bge_eeprom_getbyte(sc, off + i, &byte);
446 if (err)
447 break;
448 *(dest + i) = byte;
449 }
450
451 return(err ? 1 : 0);
452}
453
454static int
33c39a69 455bge_miibus_readreg(device_t dev, int phy, int reg)
984263bc
MD
456{
457 struct bge_softc *sc;
458 struct ifnet *ifp;
33c39a69 459 uint32_t val, autopoll;
984263bc
MD
460 int i;
461
462 sc = device_get_softc(dev);
463 ifp = &sc->arpcom.ac_if;
464
7e40b8c5
HP
465 /*
466 * Broadcom's own driver always assumes the internal
467 * PHY is at GMII address 1. On some chips, the PHY responds
468 * to accesses at all addresses, which could cause us to
469 * bogusly attach the PHY 32 times at probe type. Always
470 * restricting the lookup to address 1 is simpler than
471 * trying to figure out which chips revisions should be
472 * special-cased.
473 */
984263bc 474 if (phy != 1)
7e40b8c5 475 return(0);
984263bc
MD
476
477 /* Reading with autopolling on may trigger PCI errors */
478 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
479 if (autopoll & BGE_MIMODE_AUTOPOLL) {
480 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
481 DELAY(40);
482 }
483
484 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
485 BGE_MIPHY(phy)|BGE_MIREG(reg));
486
487 for (i = 0; i < BGE_TIMEOUT; i++) {
488 val = CSR_READ_4(sc, BGE_MI_COMM);
489 if (!(val & BGE_MICOMM_BUSY))
490 break;
491 }
492
493 if (i == BGE_TIMEOUT) {
c6fd6f3b 494 if_printf(ifp, "PHY read timed out\n");
984263bc
MD
495 val = 0;
496 goto done;
497 }
498
499 val = CSR_READ_4(sc, BGE_MI_COMM);
500
501done:
502 if (autopoll & BGE_MIMODE_AUTOPOLL) {
503 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
504 DELAY(40);
505 }
506
507 if (val & BGE_MICOMM_READFAIL)
508 return(0);
509
510 return(val & 0xFFFF);
511}
512
513static int
33c39a69 514bge_miibus_writereg(device_t dev, int phy, int reg, int val)
984263bc
MD
515{
516 struct bge_softc *sc;
33c39a69 517 uint32_t autopoll;
984263bc
MD
518 int i;
519
520 sc = device_get_softc(dev);
521
522 /* Reading with autopolling on may trigger PCI errors */
523 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
524 if (autopoll & BGE_MIMODE_AUTOPOLL) {
525 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
526 DELAY(40);
527 }
528
529 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
530 BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
531
532 for (i = 0; i < BGE_TIMEOUT; i++) {
533 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
534 break;
535 }
536
537 if (autopoll & BGE_MIMODE_AUTOPOLL) {
538 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
539 DELAY(40);
540 }
541
542 if (i == BGE_TIMEOUT) {
c6fd6f3b 543 if_printf(&sc->arpcom.ac_if, "PHY read timed out\n");
984263bc
MD
544 return(0);
545 }
546
547 return(0);
548}
549
550static void
33c39a69 551bge_miibus_statchg(device_t dev)
984263bc
MD
552{
553 struct bge_softc *sc;
554 struct mii_data *mii;
555
556 sc = device_get_softc(dev);
557 mii = device_get_softc(sc->bge_miibus);
558
559 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
7f259627 560 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
984263bc
MD
561 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
562 } else {
563 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
564 }
565
566 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
567 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
568 } else {
569 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
570 }
984263bc
MD
571}
572
573/*
574 * Handle events that have triggered interrupts.
575 */
576static void
33c39a69 577bge_handle_events(struct bge_softc *sc)
984263bc 578{
984263bc
MD
579}
580
581/*
582 * Memory management for jumbo frames.
583 */
984263bc 584static int
33c39a69 585bge_alloc_jumbo_mem(struct bge_softc *sc)
984263bc 586{
2aa9b12f 587 struct bge_jslot *entry;
33c39a69
JS
588 caddr_t ptr;
589 int i;
984263bc
MD
590
591 /* Grab a big chunk o' storage. */
592 sc->bge_cdata.bge_jumbo_buf = contigmalloc(BGE_JMEM, M_DEVBUF,
593 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
594
595 if (sc->bge_cdata.bge_jumbo_buf == NULL) {
c6fd6f3b 596 if_printf(&sc->arpcom.ac_if, "no memory for jumbo buffers!\n");
984263bc
MD
597 return(ENOBUFS);
598 }
599
600 SLIST_INIT(&sc->bge_jfree_listhead);
984263bc
MD
601
602 /*
603 * Now divide it up into 9K pieces and save the addresses
604 * in an array. Note that we play an evil trick here by using
605 * the first few bytes in the buffer to hold the the address
606 * of the softc structure for this interface. This is because
607 * bge_jfree() needs it, but it is called by the mbuf management
608 * code which will not pass it to us explicitly.
609 */
610 ptr = sc->bge_cdata.bge_jumbo_buf;
611 for (i = 0; i < BGE_JSLOTS; i++) {
2aa9b12f
JS
612 entry = &sc->bge_cdata.bge_jslots[i];
613 entry->bge_sc = sc;
614 entry->bge_buf = ptr;
615 entry->bge_inuse = 0;
616 entry->bge_slot = i;
617 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jslot_link);
618 ptr += BGE_JLEN;
984263bc
MD
619 }
620
621 return(0);
622}
623
624static void
33c39a69 625bge_free_jumbo_mem(struct bge_softc *sc)
984263bc 626{
984263bc 627 contigfree(sc->bge_cdata.bge_jumbo_buf, BGE_JMEM, M_DEVBUF);
984263bc
MD
628}
629
630/*
631 * Allocate a jumbo buffer.
632 */
2aa9b12f 633static struct bge_jslot *
33c39a69 634bge_jalloc(struct bge_softc *sc)
984263bc 635{
2aa9b12f 636 struct bge_jslot *entry;
33c39a69 637
984263bc 638 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
33c39a69 639
984263bc 640 if (entry == NULL) {
c6fd6f3b 641 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
984263bc
MD
642 return(NULL);
643 }
644
2aa9b12f
JS
645 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jslot_link);
646 entry->bge_inuse = 1;
647 return(entry);
984263bc
MD
648}
649
650/*
651 * Adjust usage count on a jumbo buffer.
652 */
653static void
2aa9b12f 654bge_jref(void *arg)
984263bc 655{
2aa9b12f
JS
656 struct bge_jslot *entry = (struct bge_jslot *)arg;
657 struct bge_softc *sc = entry->bge_sc;
984263bc
MD
658
659 if (sc == NULL)
660 panic("bge_jref: can't find softc pointer!");
661
2aa9b12f 662 if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry)
984263bc
MD
663 panic("bge_jref: asked to reference buffer "
664 "that we don't manage!");
2aa9b12f 665 else if (entry->bge_inuse == 0)
984263bc
MD
666 panic("bge_jref: buffer already free!");
667 else
2aa9b12f 668 entry->bge_inuse++;
984263bc
MD
669}
670
671/*
672 * Release a jumbo buffer.
673 */
674static void
2aa9b12f 675bge_jfree(void *arg)
984263bc 676{
2aa9b12f
JS
677 struct bge_jslot *entry = (struct bge_jslot *)arg;
678 struct bge_softc *sc = entry->bge_sc;
984263bc
MD
679
680 if (sc == NULL)
681 panic("bge_jfree: can't find softc pointer!");
682
2aa9b12f 683 if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry)
984263bc 684 panic("bge_jfree: asked to free buffer that we don't manage!");
2aa9b12f 685 else if (entry->bge_inuse == 0)
984263bc 686 panic("bge_jfree: buffer already free!");
2aa9b12f
JS
687 else if (--entry->bge_inuse == 0)
688 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jslot_link);
984263bc
MD
689}
690
691
692/*
693 * Intialize a standard receive ring descriptor.
694 */
695static int
33c39a69 696bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m)
984263bc 697{
33c39a69
JS
698 struct mbuf *m_new = NULL;
699 struct bge_rx_bd *r;
984263bc
MD
700
701 if (m == NULL) {
d5086f2b 702 m_new = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
33c39a69 703 if (m_new == NULL)
d5086f2b 704 return (ENOBUFS);
984263bc
MD
705 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
706 } else {
707 m_new = m;
708 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
709 m_new->m_data = m_new->m_ext.ext_buf;
710 }
711
712 if (!sc->bge_rx_alignment_bug)
713 m_adj(m_new, ETHER_ALIGN);
714 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
715 r = &sc->bge_rdata->bge_rx_std_ring[i];
7e40b8c5 716 BGE_HOSTADDR(r->bge_addr, vtophys(mtod(m_new, caddr_t)));
984263bc
MD
717 r->bge_flags = BGE_RXBDFLAG_END;
718 r->bge_len = m_new->m_len;
719 r->bge_idx = i;
720
721 return(0);
722}
723
724/*
725 * Initialize a jumbo receive ring descriptor. This allocates
726 * a jumbo buffer from the pool managed internally by the driver.
727 */
728static int
33c39a69 729bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
984263bc
MD
730{
731 struct mbuf *m_new = NULL;
732 struct bge_rx_bd *r;
733
734 if (m == NULL) {
2aa9b12f 735 struct bge_jslot *buf;
984263bc
MD
736
737 /* Allocate the mbuf. */
74f1caca 738 MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
33c39a69 739 if (m_new == NULL)
984263bc 740 return(ENOBUFS);
984263bc
MD
741
742 /* Allocate the jumbo buffer */
743 buf = bge_jalloc(sc);
744 if (buf == NULL) {
745 m_freem(m_new);
c6fd6f3b
JS
746 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed "
747 "-- packet dropped!\n");
984263bc
MD
748 return(ENOBUFS);
749 }
750
751 /* Attach the buffer to the mbuf. */
2aa9b12f
JS
752 m_new->m_ext.ext_arg = buf;
753 m_new->m_ext.ext_buf = buf->bge_buf;
b542cd49
JS
754 m_new->m_ext.ext_free = bge_jfree;
755 m_new->m_ext.ext_ref = bge_jref;
2aa9b12f
JS
756 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
757
758 m_new->m_data = m_new->m_ext.ext_buf;
759 m_new->m_flags |= M_EXT;
760 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
984263bc
MD
761 } else {
762 m_new = m;
763 m_new->m_data = m_new->m_ext.ext_buf;
764 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
765 }
766
767 if (!sc->bge_rx_alignment_bug)
768 m_adj(m_new, ETHER_ALIGN);
769 /* Set up the descriptor. */
770 r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
771 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
7e40b8c5 772 BGE_HOSTADDR(r->bge_addr, vtophys(mtod(m_new, caddr_t)));
984263bc
MD
773 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
774 r->bge_len = m_new->m_len;
775 r->bge_idx = i;
776
777 return(0);
778}
779
780/*
781 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
782 * that's 1MB or memory, which is a lot. For now, we fill only the first
783 * 256 ring entries and hope that our CPU is fast enough to keep up with
784 * the NIC.
785 */
786static int
33c39a69 787bge_init_rx_ring_std(struct bge_softc *sc)
984263bc
MD
788{
789 int i;
790
791 for (i = 0; i < BGE_SSLOTS; i++) {
792 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
793 return(ENOBUFS);
794 };
795
796 sc->bge_std = i - 1;
797 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
798
799 return(0);
800}
801
802static void
33c39a69 803bge_free_rx_ring_std(struct bge_softc *sc)
984263bc
MD
804{
805 int i;
806
807 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
808 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
809 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
810 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
811 }
33c39a69 812 bzero(&sc->bge_rdata->bge_rx_std_ring[i],
984263bc
MD
813 sizeof(struct bge_rx_bd));
814 }
984263bc
MD
815}
816
817static int
33c39a69 818bge_init_rx_ring_jumbo(struct bge_softc *sc)
984263bc
MD
819{
820 int i;
821 struct bge_rcb *rcb;
822
823 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
824 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
825 return(ENOBUFS);
826 };
827
828 sc->bge_jumbo = i - 1;
829
830 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
831 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
832 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
833
834 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
835
836 return(0);
837}
838
839static void
33c39a69 840bge_free_rx_ring_jumbo(struct bge_softc *sc)
984263bc
MD
841{
842 int i;
843
844 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
845 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
846 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
847 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
848 }
33c39a69 849 bzero(&sc->bge_rdata->bge_rx_jumbo_ring[i],
984263bc
MD
850 sizeof(struct bge_rx_bd));
851 }
984263bc
MD
852}
853
854static void
33c39a69 855bge_free_tx_ring(struct bge_softc *sc)
984263bc
MD
856{
857 int i;
858
859 if (sc->bge_rdata->bge_tx_ring == NULL)
860 return;
861
862 for (i = 0; i < BGE_TX_RING_CNT; i++) {
863 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
864 m_freem(sc->bge_cdata.bge_tx_chain[i]);
865 sc->bge_cdata.bge_tx_chain[i] = NULL;
866 }
33c39a69 867 bzero(&sc->bge_rdata->bge_tx_ring[i],
984263bc
MD
868 sizeof(struct bge_tx_bd));
869 }
984263bc
MD
870}
871
872static int
33c39a69 873bge_init_tx_ring(struct bge_softc *sc)
984263bc
MD
874{
875 sc->bge_txcnt = 0;
876 sc->bge_tx_saved_considx = 0;
877
878 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
879 /* 5700 b2 errata */
880 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
881 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
882
883 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
884 /* 5700 b2 errata */
885 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
886 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
887
888 return(0);
889}
890
984263bc 891static void
33c39a69 892bge_setmulti(struct bge_softc *sc)
984263bc
MD
893{
894 struct ifnet *ifp;
895 struct ifmultiaddr *ifma;
33c39a69 896 uint32_t hashes[4] = { 0, 0, 0, 0 };
984263bc
MD
897 int h, i;
898
899 ifp = &sc->arpcom.ac_if;
900
901 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
902 for (i = 0; i < 4; i++)
903 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
904 return;
905 }
906
907 /* First, zot all the existing filters. */
908 for (i = 0; i < 4; i++)
909 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
910
911 /* Now program new ones. */
33c39a69 912 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
984263bc
MD
913 if (ifma->ifma_addr->sa_family != AF_LINK)
914 continue;
3b4ec5b8
JS
915 h = ether_crc32_le(
916 LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
917 ETHER_ADDR_LEN) & 0x7f;
984263bc
MD
918 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
919 }
920
921 for (i = 0; i < 4; i++)
922 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
984263bc
MD
923}
924
925/*
926 * Do endian, PCI and DMA initialization. Also check the on-board ROM
927 * self-test results.
928 */
929static int
33c39a69 930bge_chipinit(struct bge_softc *sc)
984263bc 931{
33c39a69
JS
932 int i;
933 uint32_t dma_rw_ctl;
984263bc
MD
934
935 /* Set endianness before we access any non-PCI registers. */
936#if BYTE_ORDER == BIG_ENDIAN
937 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
938 BGE_BIGENDIAN_INIT, 4);
939#else
940 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
941 BGE_LITTLEENDIAN_INIT, 4);
942#endif
943
944 /*
945 * Check the 'ROM failed' bit on the RX CPU to see if
946 * self-tests passed.
947 */
948 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
c6fd6f3b
JS
949 if_printf(&sc->arpcom.ac_if,
950 "RX CPU self-diagnostics failed!\n");
984263bc
MD
951 return(ENODEV);
952 }
953
954 /* Clear the MAC control register */
955 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
956
957 /*
958 * Clear the MAC statistics block in the NIC's
959 * internal memory.
960 */
961 for (i = BGE_STATS_BLOCK;
33c39a69 962 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
984263bc
MD
963 BGE_MEMWIN_WRITE(sc, i, 0);
964
965 for (i = BGE_STATUS_BLOCK;
33c39a69 966 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
984263bc
MD
967 BGE_MEMWIN_WRITE(sc, i, 0);
968
969 /* Set up the PCI DMA control register. */
970 if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
971 BGE_PCISTATE_PCI_BUSMODE) {
972 /* Conventional PCI bus */
973 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
974 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
975 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
976 (0x0F);
977 } else {
978 /* PCI-X bus */
979 /*
980 * The 5704 uses a different encoding of read/write
981 * watermarks.
982 */
983 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
984 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
985 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
986 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
987 else
988 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
989 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
990 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
991 (0x0F);
992
993 /*
994 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
995 * for hardware bugs.
996 */
997 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
998 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
33c39a69 999 uint32_t tmp;
984263bc
MD
1000
1001 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1002 if (tmp == 0x6 || tmp == 0x7)
1003 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1004 }
1005 }
1006
1007 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
7e40b8c5
HP
1008 sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1009 sc->bge_asicrev == BGE_ASICREV_BCM5705)
984263bc
MD
1010 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1011 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1012
1013 /*
1014 * Set up general mode register.
1015 */
1016 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME|
1017 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1018 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
b4fdf56d 1019 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
984263bc
MD
1020
1021 /*
1022 * Disable memory write invalidate. Apparently it is not supported
1023 * properly by these devices.
1024 */
1025 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1026
984263bc
MD
1027 /* Set the timer prescaler (always 66Mhz) */
1028 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1029
1030 return(0);
1031}
1032
1033static int
33c39a69 1034bge_blockinit(struct bge_softc *sc)
984263bc
MD
1035{
1036 struct bge_rcb *rcb;
1037 volatile struct bge_rcb *vrcb;
1038 int i;
1039
1040 /*
1041 * Initialize the memory window pointer register so that
1042 * we can access the first 32K of internal NIC RAM. This will
1043 * allow us to set up the TX send ring RCBs and the RX return
1044 * ring RCBs, plus other things which live in NIC memory.
1045 */
1046 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1047
7e40b8c5
HP
1048 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1049
1050 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1051 /* Configure mbuf memory pool */
1052 if (sc->bge_extram) {
1053 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1054 BGE_EXT_SSRAM);
1055 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1056 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1057 else
1058 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1059 } else {
1060 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1061 BGE_BUFFPOOL_1);
1062 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1063 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1064 else
1065 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1066 }
984263bc 1067
7e40b8c5
HP
1068 /* Configure DMA resource pool */
1069 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1070 BGE_DMA_DESCRIPTORS);
1071 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1072 }
984263bc
MD
1073
1074 /* Configure mbuf pool watermarks */
7e40b8c5
HP
1075 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
1076 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1077 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1078 } else {
1079 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1080 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1081 }
984263bc
MD
1082 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1083
1084 /* Configure DMA resource watermarks */
1085 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1086 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1087
1088 /* Enable buffer manager */
7e40b8c5
HP
1089 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1090 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1091 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
984263bc 1092
7e40b8c5
HP
1093 /* Poll for buffer manager start indication */
1094 for (i = 0; i < BGE_TIMEOUT; i++) {
1095 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1096 break;
1097 DELAY(10);
1098 }
984263bc 1099
7e40b8c5 1100 if (i == BGE_TIMEOUT) {
c6fd6f3b
JS
1101 if_printf(&sc->arpcom.ac_if,
1102 "buffer manager failed to start\n");
7e40b8c5
HP
1103 return(ENXIO);
1104 }
984263bc
MD
1105 }
1106
1107 /* Enable flow-through queues */
1108 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1109 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1110
1111 /* Wait until queue initialization is complete */
1112 for (i = 0; i < BGE_TIMEOUT; i++) {
1113 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1114 break;
1115 DELAY(10);
1116 }
1117
1118 if (i == BGE_TIMEOUT) {
c6fd6f3b
JS
1119 if_printf(&sc->arpcom.ac_if,
1120 "flow-through queue init failed\n");
984263bc
MD
1121 return(ENXIO);
1122 }
1123
1124 /* Initialize the standard RX ring control block */
1125 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
7e40b8c5
HP
1126 BGE_HOSTADDR(rcb->bge_hostaddr,
1127 vtophys(&sc->bge_rdata->bge_rx_std_ring));
1128 if (sc->bge_asicrev == BGE_ASICREV_BCM5705)
1129 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1130 else
1131 rcb->bge_maxlen_flags =
1132 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
984263bc
MD
1133 if (sc->bge_extram)
1134 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1135 else
1136 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1137 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1138 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1139 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1140 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1141
1142 /*
1143 * Initialize the jumbo RX ring control block
1144 * We set the 'ring disabled' bit in the flags
1145 * field until we're actually ready to start
1146 * using this ring (i.e. once we set the MTU
1147 * high enough to require it).
1148 */
7e40b8c5
HP
1149 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1150 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1151 BGE_HOSTADDR(rcb->bge_hostaddr,
1152 vtophys(&sc->bge_rdata->bge_rx_jumbo_ring));
1153 rcb->bge_maxlen_flags =
1154 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1155 BGE_RCB_FLAG_RING_DISABLED);
1156 if (sc->bge_extram)
1157 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1158 else
1159 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1160 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1161 rcb->bge_hostaddr.bge_addr_hi);
1162 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1163 rcb->bge_hostaddr.bge_addr_lo);
1164 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1165 rcb->bge_maxlen_flags);
1166 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1167
1168 /* Set up dummy disabled mini ring RCB */
1169 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
1170 rcb->bge_maxlen_flags =
1171 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1172 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1173 rcb->bge_maxlen_flags);
1174 }
984263bc
MD
1175
1176 /*
1177 * Set the BD ring replentish thresholds. The recommended
1178 * values are 1/8th the number of descriptors allocated to
1179 * each ring.
1180 */
1181 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1182 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1183
1184 /*
1185 * Disable all unused send rings by setting the 'ring disabled'
1186 * bit in the flags field of all the TX send ring control blocks.
1187 * These are located in NIC memory.
1188 */
1189 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1190 BGE_SEND_RING_RCB);
1191 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1192 vrcb->bge_maxlen_flags =
1193 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1194 vrcb->bge_nicaddr = 0;
1195 vrcb++;
1196 }
1197
1198 /* Configure TX RCB 0 (we use only the first ring) */
1199 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1200 BGE_SEND_RING_RCB);
1201 vrcb->bge_hostaddr.bge_addr_hi = 0;
7e40b8c5 1202 BGE_HOSTADDR(vrcb->bge_hostaddr, vtophys(&sc->bge_rdata->bge_tx_ring));
984263bc 1203 vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT);
7e40b8c5
HP
1204 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1205 vrcb->bge_maxlen_flags =
1206 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0);
984263bc
MD
1207
1208 /* Disable all unused RX return rings */
1209 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1210 BGE_RX_RETURN_RING_RCB);
1211 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1212 vrcb->bge_hostaddr.bge_addr_hi = 0;
1213 vrcb->bge_hostaddr.bge_addr_lo = 0;
1214 vrcb->bge_maxlen_flags =
7e40b8c5 1215 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
984263bc
MD
1216 BGE_RCB_FLAG_RING_DISABLED);
1217 vrcb->bge_nicaddr = 0;
1218 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
33c39a69 1219 (i * (sizeof(uint64_t))), 0);
984263bc
MD
1220 vrcb++;
1221 }
1222
1223 /* Initialize RX ring indexes */
1224 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1225 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1226 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1227
1228 /*
1229 * Set up RX return ring 0
1230 * Note that the NIC address for RX return rings is 0x00000000.
1231 * The return rings live entirely within the host, so the
1232 * nicaddr field in the RCB isn't used.
1233 */
1234 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1235 BGE_RX_RETURN_RING_RCB);
1236 vrcb->bge_hostaddr.bge_addr_hi = 0;
7e40b8c5
HP
1237 BGE_HOSTADDR(vrcb->bge_hostaddr,
1238 vtophys(&sc->bge_rdata->bge_rx_return_ring));
984263bc 1239 vrcb->bge_nicaddr = 0x00000000;
7e40b8c5
HP
1240 vrcb->bge_maxlen_flags =
1241 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0);
984263bc
MD
1242
1243 /* Set random backoff seed for TX */
1244 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1245 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1246 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1247 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1248 BGE_TX_BACKOFF_SEED_MASK);
1249
1250 /* Set inter-packet gap */
1251 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1252
1253 /*
1254 * Specify which ring to use for packets that don't match
1255 * any RX rules.
1256 */
1257 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1258
1259 /*
1260 * Configure number of RX lists. One interrupt distribution
1261 * list, sixteen active lists, one bad frames class.
1262 */
1263 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1264
1265 /* Inialize RX list placement stats mask. */
1266 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1267 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1268
1269 /* Disable host coalescing until we get it set up */
1270 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1271
1272 /* Poll to make sure it's shut down. */
1273 for (i = 0; i < BGE_TIMEOUT; i++) {
1274 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1275 break;
1276 DELAY(10);
1277 }
1278
1279 if (i == BGE_TIMEOUT) {
c6fd6f3b
JS
1280 if_printf(&sc->arpcom.ac_if,
1281 "host coalescing engine failed to idle\n");
984263bc
MD
1282 return(ENXIO);
1283 }
1284
1285 /* Set up host coalescing defaults */
1286 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1287 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1288 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1289 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
7e40b8c5
HP
1290 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1291 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1292 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1293 }
984263bc
MD
1294 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1295 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
984263bc
MD
1296
1297 /* Set up address of statistics block */
7e40b8c5
HP
1298 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1299 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 0);
1300 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1301 vtophys(&sc->bge_rdata->bge_info.bge_stats));
1302
1303 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1304 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1305 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1306 }
984263bc
MD
1307
1308 /* Set up address of status block */
984263bc
MD
1309 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 0);
1310 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1311 vtophys(&sc->bge_rdata->bge_status_block));
7e40b8c5 1312
984263bc
MD
1313 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
1314 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
1315
1316 /* Turn on host coalescing state machine */
1317 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1318
1319 /* Turn on RX BD completion state machine and enable attentions */
1320 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1321 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1322
1323 /* Turn on RX list placement state machine */
1324 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1325
1326 /* Turn on RX list selector state machine. */
7e40b8c5
HP
1327 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1328 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
984263bc
MD
1329
1330 /* Turn on DMA, clear stats */
1331 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1332 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1333 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1334 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1335 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1336
1337 /* Set misc. local control, enable interrupts on attentions */
1338 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1339
1340#ifdef notdef
1341 /* Assert GPIO pins for PHY reset */
1342 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1343 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1344 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1345 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1346#endif
1347
1348 /* Turn on DMA completion state machine */
7e40b8c5
HP
1349 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1350 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
984263bc
MD
1351
1352 /* Turn on write DMA state machine */
1353 CSR_WRITE_4(sc, BGE_WDMA_MODE,
1354 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1355
1356 /* Turn on read DMA state machine */
1357 CSR_WRITE_4(sc, BGE_RDMA_MODE,
1358 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1359
1360 /* Turn on RX data completion state machine */
1361 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1362
1363 /* Turn on RX BD initiator state machine */
1364 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1365
1366 /* Turn on RX data and RX BD initiator state machine */
1367 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1368
1369 /* Turn on Mbuf cluster free state machine */
7e40b8c5
HP
1370 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1371 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
984263bc
MD
1372
1373 /* Turn on send BD completion state machine */
1374 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1375
1376 /* Turn on send data completion state machine */
1377 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1378
1379 /* Turn on send data initiator state machine */
1380 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1381
1382 /* Turn on send BD initiator state machine */
1383 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1384
1385 /* Turn on send BD selector state machine */
1386 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1387
1388 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1389 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1390 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1391
1392 /* ack/clear link change events */
1393 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
7e40b8c5
HP
1394 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1395 BGE_MACSTAT_LINK_CHANGED);
984263bc
MD
1396
1397 /* Enable PHY auto polling (for MII/GMII only) */
1398 if (sc->bge_tbi) {
1399 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1400 } else {
1401 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1402 if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1403 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1404 BGE_EVTENB_MI_INTERRUPT);
1405 }
1406
1407 /* Enable link state change attentions. */
1408 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1409
1410 return(0);
1411}
1412
1413/*
1414 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1415 * against our list and return its name if we find a match. Note
1416 * that since the Broadcom controller contains VPD support, we
1417 * can get the device name string from the controller itself instead
1418 * of the compiled-in string. This is a little slow, but it guarantees
1419 * we'll always announce the right product name.
1420 */
1421static int
33c39a69 1422bge_probe(device_t dev)
984263bc 1423{
984263bc 1424 struct bge_softc *sc;
33c39a69 1425 struct bge_type *t;
984263bc 1426 char *descbuf;
33c39a69
JS
1427 uint16_t product, vendor;
1428
1429 product = pci_get_device(dev);
1430 vendor = pci_get_vendor(dev);
1431
1432 for (t = bge_devs; t->bge_name != NULL; t++) {
1433 if (vendor == t->bge_vid && product == t->bge_did)
1434 break;
1435 }
984263bc 1436
33c39a69
JS
1437 if (t->bge_name == NULL)
1438 return(ENXIO);
984263bc
MD
1439
1440 sc = device_get_softc(dev);
33c39a69 1441#ifdef notdef
984263bc
MD
1442 sc->bge_dev = dev;
1443
33c39a69
JS
1444 bge_vpd_read(sc);
1445 device_set_desc(dev, sc->bge_vpd_prodname);
984263bc 1446#endif
33c39a69
JS
1447 descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_WAITOK);
1448 snprintf(descbuf, BGE_DEVDESC_MAX, "%s, ASIC rev. %#04x", t->bge_name,
1449 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1450 device_set_desc_copy(dev, descbuf);
1451 if (pci_get_subvendor(dev) == PCI_VENDOR_DELL)
1452 sc->bge_no_3_led = 1;
1453 free(descbuf, M_TEMP);
1454 return(0);
984263bc
MD
1455}
1456
1457static int
33c39a69 1458bge_attach(device_t dev)
984263bc
MD
1459{
1460 int s;
33c39a69 1461 uint32_t command;
984263bc
MD
1462 struct ifnet *ifp;
1463 struct bge_softc *sc;
33c39a69
JS
1464 uint32_t hwcfg = 0;
1465 uint32_t mac_addr = 0;
c6fd6f3b 1466 int error = 0, rid;
0a8b5977 1467 uint8_t ether_addr[ETHER_ADDR_LEN];
984263bc
MD
1468
1469 s = splimp();
1470
1471 sc = device_get_softc(dev);
984263bc 1472 sc->bge_dev = dev;
263489fb 1473 callout_init(&sc->bge_stat_timer);
984263bc
MD
1474
1475 /*
1476 * Map control/status registers.
1477 */
cc8ddf9e
JS
1478 pci_enable_busmaster(dev);
1479 pci_enable_io(dev, SYS_RES_MEMORY);
984263bc
MD
1480 command = pci_read_config(dev, PCIR_COMMAND, 4);
1481
1482 if (!(command & PCIM_CMD_MEMEN)) {
c6fd6f3b 1483 device_printf(dev, "failed to enable memory mapping!\n");
984263bc
MD
1484 error = ENXIO;
1485 goto fail;
1486 }
1487
1488 rid = BGE_PCI_BAR0;
cc8ddf9e
JS
1489 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1490 RF_ACTIVE);
984263bc
MD
1491
1492 if (sc->bge_res == NULL) {
c6fd6f3b 1493 device_printf(dev, "couldn't map memory\n");
984263bc
MD
1494 error = ENXIO;
1495 goto fail;
1496 }
1497
1498 sc->bge_btag = rman_get_bustag(sc->bge_res);
1499 sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
1500 sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res);
1501
984263bc
MD
1502 /* Allocate interrupt */
1503 rid = 0;
33c39a69 1504
cc8ddf9e 1505 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
984263bc
MD
1506 RF_SHAREABLE | RF_ACTIVE);
1507
1508 if (sc->bge_irq == NULL) {
c6fd6f3b 1509 device_printf(dev, "couldn't map interrupt\n");
984263bc
MD
1510 error = ENXIO;
1511 goto fail;
1512 }
1513
1514 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET,
e9cb6d99 1515 bge_intr, sc, &sc->bge_intrhand, NULL);
984263bc
MD
1516
1517 if (error) {
1518 bge_release_resources(sc);
c6fd6f3b 1519 device_printf(dev, "couldn't set up irq\n");
984263bc
MD
1520 goto fail;
1521 }
1522
c6fd6f3b
JS
1523 ifp = &sc->arpcom.ac_if;
1524 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
984263bc
MD
1525
1526 /* Try to reset the chip. */
1527 bge_reset(sc);
1528
1529 if (bge_chipinit(sc)) {
c6fd6f3b 1530 device_printf(dev, "chip initialization failed\n");
984263bc
MD
1531 bge_release_resources(sc);
1532 error = ENXIO;
1533 goto fail;
1534 }
1535
1536 /*
1537 * Get station address from the EEPROM.
1538 */
1539 mac_addr = bge_readmem_ind(sc, 0x0c14);
1540 if ((mac_addr >> 16) == 0x484b) {
0a8b5977
JS
1541 ether_addr[0] = (uint8_t)(mac_addr >> 8);
1542 ether_addr[1] = (uint8_t)mac_addr;
984263bc 1543 mac_addr = bge_readmem_ind(sc, 0x0c18);
0a8b5977
JS
1544 ether_addr[2] = (uint8_t)(mac_addr >> 24);
1545 ether_addr[3] = (uint8_t)(mac_addr >> 16);
1546 ether_addr[4] = (uint8_t)(mac_addr >> 8);
1547 ether_addr[5] = (uint8_t)mac_addr;
1548 } else if (bge_read_eeprom(sc, ether_addr,
984263bc 1549 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
c6fd6f3b 1550 device_printf(dev, "failed to read station address\n");
984263bc
MD
1551 bge_release_resources(sc);
1552 error = ENXIO;
1553 goto fail;
1554 }
1555
984263bc
MD
1556 /* Allocate the general information block and ring buffers. */
1557 sc->bge_rdata = contigmalloc(sizeof(struct bge_ring_data), M_DEVBUF,
1558 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1559
1560 if (sc->bge_rdata == NULL) {
1561 bge_release_resources(sc);
1562 error = ENXIO;
c6fd6f3b 1563 device_printf(dev, "no memory for list buffers!\n");
984263bc
MD
1564 goto fail;
1565 }
1566
1567 bzero(sc->bge_rdata, sizeof(struct bge_ring_data));
1568
7e40b8c5
HP
1569 /* Save ASIC rev. */
1570
1571 sc->bge_chipid =
1572 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1573 BGE_PCIMISCCTL_ASICREV;
1574 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
1575 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
1576
1577 /*
1578 * Try to allocate memory for jumbo buffers.
1579 * The 5705 does not appear to support jumbo frames.
1580 */
1581 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1582 if (bge_alloc_jumbo_mem(sc)) {
c6fd6f3b 1583 device_printf(dev, "jumbo buffer allocation failed\n");
7e40b8c5
HP
1584 bge_release_resources(sc);
1585 error = ENXIO;
1586 goto fail;
1587 }
984263bc
MD
1588 }
1589
1590 /* Set default tuneable values. */
1591 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
1592 sc->bge_rx_coal_ticks = 150;
1593 sc->bge_tx_coal_ticks = 150;
1594 sc->bge_rx_max_coal_bds = 64;
1595 sc->bge_tx_max_coal_bds = 128;
1596
7e40b8c5
HP
1597 /* 5705 limits RX return ring to 512 entries. */
1598 if (sc->bge_asicrev == BGE_ASICREV_BCM5705)
1599 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1600 else
1601 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
1602
984263bc 1603 /* Set up ifnet structure */
984263bc 1604 ifp->if_softc = sc;
984263bc
MD
1605 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1606 ifp->if_ioctl = bge_ioctl;
984263bc
MD
1607 ifp->if_start = bge_start;
1608 ifp->if_watchdog = bge_watchdog;
1609 ifp->if_init = bge_init;
1610 ifp->if_mtu = ETHERMTU;
936ff230
JS
1611 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1612 ifq_set_ready(&ifp->if_snd);
984263bc
MD
1613 ifp->if_hwassist = BGE_CSUM_FEATURES;
1614 ifp->if_capabilities = IFCAP_HWCSUM;
1615 ifp->if_capenable = ifp->if_capabilities;
1616
984263bc
MD
1617 /*
1618 * Figure out what sort of media we have by checking the
1619 * hardware config word in the first 32k of NIC internal memory,
1620 * or fall back to examining the EEPROM if necessary.
1621 * Note: on some BCM5700 cards, this value appears to be unset.
1622 * If that's the case, we have to rely on identifying the NIC
1623 * by its PCI subsystem ID, as we do below for the SysKonnect
1624 * SK-9D41.
1625 */
1626 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
1627 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
1628 else {
1629 bge_read_eeprom(sc, (caddr_t)&hwcfg,
1630 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
1631 hwcfg = ntohl(hwcfg);
1632 }
1633
1634 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
1635 sc->bge_tbi = 1;
1636
1637 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
cc8ddf9e 1638 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41)
984263bc
MD
1639 sc->bge_tbi = 1;
1640
1641 if (sc->bge_tbi) {
1642 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
1643 bge_ifmedia_upd, bge_ifmedia_sts);
1644 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
1645 ifmedia_add(&sc->bge_ifmedia,
1646 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
1647 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1648 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
1649 } else {
1650 /*
1651 * Do transceiver setup.
1652 */
1653 if (mii_phy_probe(dev, &sc->bge_miibus,
1654 bge_ifmedia_upd, bge_ifmedia_sts)) {
c6fd6f3b 1655 device_printf(dev, "MII without any PHY!\n");
984263bc
MD
1656 bge_release_resources(sc);
1657 bge_free_jumbo_mem(sc);
1658 error = ENXIO;
1659 goto fail;
1660 }
1661 }
1662
1663 /*
1664 * When using the BCM5701 in PCI-X mode, data corruption has
1665 * been observed in the first few bytes of some received packets.
1666 * Aligning the packet buffer in memory eliminates the corruption.
1667 * Unfortunately, this misaligns the packet payloads. On platforms
1668 * which do not support unaligned accesses, we will realign the
1669 * payloads by copying the received packets.
1670 */
1671 switch (sc->bge_chipid) {
1672 case BGE_CHIPID_BCM5701_A0:
1673 case BGE_CHIPID_BCM5701_B0:
1674 case BGE_CHIPID_BCM5701_B2:
1675 case BGE_CHIPID_BCM5701_B5:
1676 /* If in PCI-X mode, work around the alignment bug. */
1677 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
1678 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
1679 BGE_PCISTATE_PCI_BUSSPEED)
1680 sc->bge_rx_alignment_bug = 1;
1681 break;
1682 }
1683
1684 /*
1685 * Call MI attach routine.
1686 */
0a8b5977 1687 ether_ifattach(ifp, ether_addr);
984263bc
MD
1688
1689fail:
1690 splx(s);
1691
1692 return(error);
1693}
1694
1695static int
33c39a69 1696bge_detach(device_t dev)
984263bc
MD
1697{
1698 struct bge_softc *sc;
1699 struct ifnet *ifp;
1700 int s;
1701
1702 s = splimp();
1703
1704 sc = device_get_softc(dev);
1705 ifp = &sc->arpcom.ac_if;
1706
0a8b5977 1707 ether_ifdetach(ifp);
984263bc
MD
1708 bge_stop(sc);
1709 bge_reset(sc);
1710
1711 if (sc->bge_tbi) {
1712 ifmedia_removeall(&sc->bge_ifmedia);
1713 } else {
1714 bus_generic_detach(dev);
1715 device_delete_child(dev, sc->bge_miibus);
1716 }
1717
1718 bge_release_resources(sc);
7e40b8c5
HP
1719 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1720 bge_free_jumbo_mem(sc);
984263bc
MD
1721
1722 splx(s);
1723
1724 return(0);
1725}
1726
1727static void
33c39a69 1728bge_release_resources(struct bge_softc *sc)
984263bc
MD
1729{
1730 device_t dev;
1731
1732 dev = sc->bge_dev;
1733
1734 if (sc->bge_vpd_prodname != NULL)
1735 free(sc->bge_vpd_prodname, M_DEVBUF);
1736
1737 if (sc->bge_vpd_readonly != NULL)
1738 free(sc->bge_vpd_readonly, M_DEVBUF);
1739
1740 if (sc->bge_intrhand != NULL)
1741 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
1742
1743 if (sc->bge_irq != NULL)
1744 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
1745
1746 if (sc->bge_res != NULL)
1747 bus_release_resource(dev, SYS_RES_MEMORY,
1748 BGE_PCI_BAR0, sc->bge_res);
1749
1750 if (sc->bge_rdata != NULL)
33c39a69
JS
1751 contigfree(sc->bge_rdata, sizeof(struct bge_ring_data),
1752 M_DEVBUF);
984263bc
MD
1753
1754 return;
1755}
1756
1757static void
33c39a69 1758bge_reset(struct bge_softc *sc)
984263bc
MD
1759{
1760 device_t dev;
33c39a69 1761 uint32_t cachesize, command, pcistate;
984263bc
MD
1762 int i, val = 0;
1763
1764 dev = sc->bge_dev;
1765
1766 /* Save some important PCI state. */
1767 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
1768 command = pci_read_config(dev, BGE_PCI_CMD, 4);
1769 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
1770
1771 pci_write_config(dev, BGE_PCI_MISC_CTL,
1772 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1773 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1774
1775 /* Issue global reset */
1776 bge_writereg_ind(sc, BGE_MISC_CFG,
33c39a69 1777 BGE_MISCCFG_RESET_CORE_CLOCKS | (65<<1));
984263bc
MD
1778
1779 DELAY(1000);
1780
1781 /* Reset some of the PCI state that got zapped by reset */
1782 pci_write_config(dev, BGE_PCI_MISC_CTL,
1783 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1784 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1785 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
1786 pci_write_config(dev, BGE_PCI_CMD, command, 4);
1787 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
1788
1789 /*
1790 * Prevent PXE restart: write a magic number to the
1791 * general communications memory at 0xB50.
1792 */
1793 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1794 /*
1795 * Poll the value location we just wrote until
1796 * we see the 1's complement of the magic number.
1797 * This indicates that the firmware initialization
1798 * is complete.
1799 */
1800 for (i = 0; i < BGE_TIMEOUT; i++) {
1801 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
1802 if (val == ~BGE_MAGIC_NUMBER)
1803 break;
1804 DELAY(10);
1805 }
1806
1807 if (i == BGE_TIMEOUT) {
c6fd6f3b 1808 if_printf(&sc->arpcom.ac_if, "firmware handshake timed out\n");
984263bc
MD
1809 return;
1810 }
1811
1812 /*
1813 * XXX Wait for the value of the PCISTATE register to
1814 * return to its original pre-reset state. This is a
1815 * fairly good indicator of reset completion. If we don't
1816 * wait for the reset to fully complete, trying to read
1817 * from the device's non-PCI registers may yield garbage
1818 * results.
1819 */
1820 for (i = 0; i < BGE_TIMEOUT; i++) {
1821 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
1822 break;
1823 DELAY(10);
1824 }
1825
1826 /* Enable memory arbiter. */
7e40b8c5
HP
1827 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1828 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
984263bc
MD
1829
1830 /* Fix up byte swapping */
1831 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME|
1832 BGE_MODECTL_BYTESWAP_DATA);
1833
1834 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1835
1836 DELAY(10000);
1837
1838 return;
1839}
1840
1841/*
1842 * Frame reception handling. This is called if there's a frame
1843 * on the receive return list.
1844 *
1845 * Note: we have to be able to handle two possibilities here:
1846 * 1) the frame is from the jumbo recieve ring
1847 * 2) the frame is from the standard receive ring
1848 */
1849
1850static void
33c39a69 1851bge_rxeof(struct bge_softc *sc)
984263bc
MD
1852{
1853 struct ifnet *ifp;
1854 int stdcnt = 0, jumbocnt = 0;
1855
1856 ifp = &sc->arpcom.ac_if;
1857
1858 while(sc->bge_rx_saved_considx !=
1859 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) {
1860 struct bge_rx_bd *cur_rx;
33c39a69 1861 uint32_t rxidx;
984263bc 1862 struct mbuf *m = NULL;
33c39a69 1863 uint16_t vlan_tag = 0;
984263bc
MD
1864 int have_tag = 0;
1865
1866 cur_rx =
1867 &sc->bge_rdata->bge_rx_return_ring[sc->bge_rx_saved_considx];
1868
1869 rxidx = cur_rx->bge_idx;
7e40b8c5 1870 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
984263bc
MD
1871
1872 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
1873 have_tag = 1;
1874 vlan_tag = cur_rx->bge_vlan_tag;
1875 }
1876
1877 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
1878 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1879 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
1880 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
1881 jumbocnt++;
1882 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
1883 ifp->if_ierrors++;
1884 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
1885 continue;
1886 }
1887 if (bge_newbuf_jumbo(sc,
1888 sc->bge_jumbo, NULL) == ENOBUFS) {
1889 ifp->if_ierrors++;
1890 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
1891 continue;
1892 }
1893 } else {
1894 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1895 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
1896 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
1897 stdcnt++;
1898 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
1899 ifp->if_ierrors++;
1900 bge_newbuf_std(sc, sc->bge_std, m);
1901 continue;
1902 }
1903 if (bge_newbuf_std(sc, sc->bge_std,
1904 NULL) == ENOBUFS) {
1905 ifp->if_ierrors++;
1906 bge_newbuf_std(sc, sc->bge_std, m);
1907 continue;
1908 }
1909 }
1910
1911 ifp->if_ipackets++;
1912#ifndef __i386__
1913 /*
1914 * The i386 allows unaligned accesses, but for other
1915 * platforms we must make sure the payload is aligned.
1916 */
1917 if (sc->bge_rx_alignment_bug) {
1918 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
1919 cur_rx->bge_len);
1920 m->m_data += ETHER_ALIGN;
1921 }
1922#endif
160185fa 1923 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
984263bc
MD
1924 m->m_pkthdr.rcvif = ifp;
1925
984263bc
MD
1926#if 0 /* currently broken for some packets, possibly related to TCP options */
1927 if (ifp->if_hwassist) {
1928 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1929 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
1930 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1931 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
1932 m->m_pkthdr.csum_data =
1933 cur_rx->bge_tcp_udp_csum;
1934 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
1935 }
1936 }
1937#endif
1938
1939 /*
1940 * If we received a packet with a vlan tag, pass it
1941 * to vlan_input() instead of ether_input().
1942 */
1943 if (have_tag) {
3013ac0e 1944 VLAN_INPUT_TAG(m, vlan_tag);
984263bc
MD
1945 have_tag = vlan_tag = 0;
1946 continue;
1947 }
1948
3013ac0e 1949 (*ifp->if_input)(ifp, m);
984263bc
MD
1950 }
1951
1952 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
1953 if (stdcnt)
1954 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1955 if (jumbocnt)
1956 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
984263bc
MD
1957}
1958
1959static void
33c39a69 1960bge_txeof(struct bge_softc *sc)
984263bc
MD
1961{
1962 struct bge_tx_bd *cur_tx = NULL;
1963 struct ifnet *ifp;
1964
1965 ifp = &sc->arpcom.ac_if;
1966
1967 /*
1968 * Go through our tx ring and free mbufs for those
1969 * frames that have been sent.
1970 */
1971 while (sc->bge_tx_saved_considx !=
1972 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
33c39a69 1973 uint32_t idx = 0;
984263bc
MD
1974
1975 idx = sc->bge_tx_saved_considx;
1976 cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
1977 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
1978 ifp->if_opackets++;
1979 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
1980 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
1981 sc->bge_cdata.bge_tx_chain[idx] = NULL;
1982 }
1983 sc->bge_txcnt--;
1984 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
1985 ifp->if_timer = 0;
1986 }
1987
1988 if (cur_tx != NULL)
1989 ifp->if_flags &= ~IFF_OACTIVE;
984263bc
MD
1990}
1991
1992static void
33c39a69 1993bge_intr(void *xsc)
984263bc 1994{
bf522c7f 1995 struct bge_softc *sc = xsc;
33c39a69
JS
1996 struct ifnet *ifp = &sc->arpcom.ac_if;
1997 uint32_t status;
984263bc
MD
1998
1999#ifdef notdef
2000 /* Avoid this for now -- checking this register is expensive. */
2001 /* Make sure this is really our interrupt. */
2002 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2003 return;
2004#endif
2005 /* Ack interrupt and stop others from occuring. */
2006 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2007
2008 /*
2009 * Process link state changes.
2010 * Grrr. The link status word in the status block does
2011 * not work correctly on the BCM5700 rev AX and BX chips,
f952ab63 2012 * according to all available information. Hence, we have
984263bc 2013 * to enable MII interrupts in order to properly obtain
f952ab63 2014 * async link changes. Unfortunately, this also means that
984263bc
MD
2015 * we have to read the MAC status register to detect link
2016 * changes, thereby adding an additional register access to
2017 * the interrupt handler.
2018 */
2019
2020 if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
984263bc
MD
2021 status = CSR_READ_4(sc, BGE_MAC_STS);
2022 if (status & BGE_MACSTAT_MI_INTERRUPT) {
2023 sc->bge_link = 0;
263489fb 2024 callout_stop(&sc->bge_stat_timer);
984263bc
MD
2025 bge_tick(sc);
2026 /* Clear the interrupt */
2027 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2028 BGE_EVTENB_MI_INTERRUPT);
2029 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
2030 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
2031 BRGPHY_INTRS);
2032 }
2033 } else {
2034 if ((sc->bge_rdata->bge_status_block.bge_status &
2035 BGE_STATFLAG_UPDATED) &&
2036 (sc->bge_rdata->bge_status_block.bge_status &
2037 BGE_STATFLAG_LINKSTATE_CHANGED)) {
7e40b8c5
HP
2038 sc->bge_rdata->bge_status_block.bge_status &=
2039 ~(BGE_STATFLAG_UPDATED|
2040 BGE_STATFLAG_LINKSTATE_CHANGED);
085ed1a0 2041 /*
f952ab63 2042 * Sometimes PCS encoding errors are detected in
085ed1a0
DR
2043 * TBI mode (on fiber NICs), and for some reason
2044 * the chip will signal them as link changes.
2045 * If we get a link change event, but the 'PCS
2046 * encoding error' bit in the MAC status register
2047 * is set, don't bother doing a link check.
2048 * This avoids spurious "gigabit link up" messages
f952ab63 2049 * that sometimes appear on fiber NICs during
085ed1a0
DR
2050 * periods of heavy traffic. (There should be no
2051 * effect on copper NICs.)
2052 */
2053 status = CSR_READ_4(sc, BGE_MAC_STS);
7e40b8c5
HP
2054 if (!(status & (BGE_MACSTAT_PORT_DECODE_ERROR|
2055 BGE_MACSTAT_MI_COMPLETE))) {
085ed1a0 2056 sc->bge_link = 0;
263489fb 2057 callout_stop(&sc->bge_stat_timer);
085ed1a0
DR
2058 bge_tick(sc);
2059 }
984263bc 2060 sc->bge_link = 0;
263489fb 2061 callout_stop(&sc->bge_stat_timer);
984263bc
MD
2062 bge_tick(sc);
2063 /* Clear the interrupt */
2064 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
7e40b8c5
HP
2065 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
2066 BGE_MACSTAT_LINK_CHANGED);
984263bc
MD
2067
2068 /* Force flush the status block cached by PCI bridge */
2069 CSR_READ_4(sc, BGE_MBX_IRQ0_LO);
2070 }
2071 }
2072
2073 if (ifp->if_flags & IFF_RUNNING) {
2074 /* Check RX return ring producer/consumer */
2075 bge_rxeof(sc);
2076
2077 /* Check TX ring producer/consumer */
2078 bge_txeof(sc);
2079 }
2080
2081 bge_handle_events(sc);
2082
2083 /* Re-enable interrupts. */
2084 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2085
936ff230 2086 if ((ifp->if_flags & IFF_RUNNING) && !ifq_is_empty(&ifp->if_snd))
33c39a69 2087 (*ifp->if_start)(ifp);
984263bc
MD
2088}
2089
2090static void
33c39a69 2091bge_tick(void *xsc)
984263bc 2092{
33c39a69
JS
2093 struct bge_softc *sc = xsc;
2094 struct ifnet *ifp = &sc->arpcom.ac_if;
984263bc
MD
2095 struct mii_data *mii = NULL;
2096 struct ifmedia *ifm = NULL;
984263bc
MD
2097 int s;
2098
984263bc
MD
2099 s = splimp();
2100
7e40b8c5
HP
2101 if (sc->bge_asicrev == BGE_ASICREV_BCM5705)
2102 bge_stats_update_regs(sc);
2103 else
2104 bge_stats_update(sc);
263489fb 2105 callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
984263bc
MD
2106 if (sc->bge_link) {
2107 splx(s);
2108 return;
2109 }
2110
2111 if (sc->bge_tbi) {
2112 ifm = &sc->bge_ifmedia;
2113 if (CSR_READ_4(sc, BGE_MAC_STS) &
2114 BGE_MACSTAT_TBI_PCS_SYNCHED) {
2115 sc->bge_link++;
2116 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
c6fd6f3b 2117 if_printf(ifp, "gigabit link up\n");
936ff230 2118 if (!ifq_is_empty(&ifp->if_snd))
33c39a69 2119 (*ifp->if_start)(ifp);
984263bc
MD
2120 }
2121 splx(s);
2122 return;
2123 }
2124
2125 mii = device_get_softc(sc->bge_miibus);
2126 mii_tick(mii);
2127
2128 if (!sc->bge_link) {
2129 mii_pollstat(mii);
2130 if (mii->mii_media_status & IFM_ACTIVE &&
2131 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2132 sc->bge_link++;
7f259627 2133 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
984263bc 2134 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
c6fd6f3b 2135 if_printf(ifp, "gigabit link up\n");
936ff230 2136 if (!ifq_is_empty(&ifp->if_snd))
33c39a69 2137 (*ifp->if_start)(ifp);
984263bc
MD
2138 }
2139 }
2140
2141 splx(s);
984263bc
MD
2142}
2143
7e40b8c5 2144static void
33c39a69 2145bge_stats_update_regs(struct bge_softc *sc)
7e40b8c5 2146{
33c39a69 2147 struct ifnet *ifp = &sc->arpcom.ac_if;
7e40b8c5 2148 struct bge_mac_stats_regs stats;
33c39a69 2149 uint32_t *s;
7e40b8c5
HP
2150 int i;
2151
33c39a69 2152 s = (uint32_t *)&stats;
7e40b8c5
HP
2153 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2154 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2155 s++;
2156 }
2157
2158 ifp->if_collisions +=
2159 (stats.dot3StatsSingleCollisionFrames +
2160 stats.dot3StatsMultipleCollisionFrames +
2161 stats.dot3StatsExcessiveCollisions +
2162 stats.dot3StatsLateCollisions) -
2163 ifp->if_collisions;
7e40b8c5
HP
2164}
2165
984263bc 2166static void
33c39a69 2167bge_stats_update(struct bge_softc *sc)
984263bc 2168{
33c39a69 2169 struct ifnet *ifp = &sc->arpcom.ac_if;
984263bc
MD
2170 struct bge_stats *stats;
2171
984263bc
MD
2172 stats = (struct bge_stats *)(sc->bge_vhandle +
2173 BGE_MEMWIN_START + BGE_STATS_BLOCK);
2174
2175 ifp->if_collisions +=
7e40b8c5
HP
2176 (stats->txstats.dot3StatsSingleCollisionFrames.bge_addr_lo +
2177 stats->txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo +
2178 stats->txstats.dot3StatsExcessiveCollisions.bge_addr_lo +
2179 stats->txstats.dot3StatsLateCollisions.bge_addr_lo) -
984263bc
MD
2180 ifp->if_collisions;
2181
2182#ifdef notdef
2183 ifp->if_collisions +=
2184 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2185 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2186 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2187 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2188 ifp->if_collisions;
2189#endif
984263bc
MD
2190}
2191
2192/*
2193 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2194 * pointers to descriptors.
2195 */
2196static int
33c39a69 2197bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx)
984263bc 2198{
33c39a69
JS
2199 struct bge_tx_bd *f = NULL;
2200 struct mbuf *m;
2201 uint32_t frag, cur, cnt = 0;
2202 uint16_t csum_flags = 0;
2203 struct ifvlan *ifv = NULL;
984263bc
MD
2204
2205 if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
2206 m_head->m_pkthdr.rcvif != NULL &&
2207 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
2208 ifv = m_head->m_pkthdr.rcvif->if_softc;
2209
2210 m = m_head;
2211 cur = frag = *txidx;
2212
2213 if (m_head->m_pkthdr.csum_flags) {
2214 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2215 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2216 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2217 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2218 if (m_head->m_flags & M_LASTFRAG)
2219 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2220 else if (m_head->m_flags & M_FRAG)
2221 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2222 }
2223 /*
2224 * Start packing the mbufs in this chain into
2225 * the fragment pointers. Stop when we run out
2226 * of fragments or hit the end of the mbuf chain.
2227 */
2228 for (m = m_head; m != NULL; m = m->m_next) {
2229 if (m->m_len != 0) {
2230 f = &sc->bge_rdata->bge_tx_ring[frag];
2231 if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
2232 break;
7e40b8c5
HP
2233 BGE_HOSTADDR(f->bge_addr,
2234 vtophys(mtod(m, vm_offset_t)));
984263bc
MD
2235 f->bge_len = m->m_len;
2236 f->bge_flags = csum_flags;
2237 if (ifv != NULL) {
2238 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2239 f->bge_vlan_tag = ifv->ifv_tag;
2240 } else {
2241 f->bge_vlan_tag = 0;
2242 }
2243 /*
2244 * Sanity check: avoid coming within 16 descriptors
2245 * of the end of the ring.
2246 */
2247 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16)
2248 return(ENOBUFS);
2249 cur = frag;
2250 BGE_INC(frag, BGE_TX_RING_CNT);
2251 cnt++;
2252 }
2253 }
2254
2255 if (m != NULL)
2256 return(ENOBUFS);
2257
2258 if (frag == sc->bge_tx_saved_considx)
2259 return(ENOBUFS);
2260
2261 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
2262 sc->bge_cdata.bge_tx_chain[cur] = m_head;
2263 sc->bge_txcnt += cnt;
2264
2265 *txidx = frag;
2266
2267 return(0);
2268}
2269
2270/*
2271 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2272 * to the mbuf data regions directly in the transmit descriptors.
2273 */
2274static void
33c39a69 2275bge_start(struct ifnet *ifp)
984263bc
MD
2276{
2277 struct bge_softc *sc;
2278 struct mbuf *m_head = NULL;
33c39a69 2279 uint32_t prodidx = 0;
984263bc
MD
2280
2281 sc = ifp->if_softc;
2282
936ff230 2283 if (!sc->bge_link)
984263bc
MD
2284 return;
2285
2286 prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
2287
2288 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
936ff230 2289 m_head = ifq_poll(&ifp->if_snd);
984263bc
MD
2290 if (m_head == NULL)
2291 break;
2292
2293 /*
2294 * XXX
2295 * safety overkill. If this is a fragmented packet chain
2296 * with delayed TCP/UDP checksums, then only encapsulate
2297 * it if we have enough descriptors to handle the entire
2298 * chain at once.
2299 * (paranoia -- may not actually be needed)
2300 */
2301 if (m_head->m_flags & M_FIRSTFRAG &&
2302 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
2303 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2304 m_head->m_pkthdr.csum_data + 16) {
984263bc
MD
2305 ifp->if_flags |= IFF_OACTIVE;
2306 break;
2307 }
2308 }
2309
2310 /*
2311 * Pack the data into the transmit ring. If we
2312 * don't have room, set the OACTIVE flag and wait
2313 * for the NIC to drain the ring.
2314 */
2315 if (bge_encap(sc, m_head, &prodidx)) {
984263bc
MD
2316 ifp->if_flags |= IFF_OACTIVE;
2317 break;
2318 }
936ff230 2319 m_head = ifq_dequeue(&ifp->if_snd);
984263bc 2320
7600679e 2321 BPF_MTAP(ifp, m_head);
984263bc
MD
2322 }
2323
2324 /* Transmit */
2325 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2326 /* 5700 b2 errata */
2327 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
2328 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2329
2330 /*
2331 * Set a timeout in case the chip goes out to lunch.
2332 */
2333 ifp->if_timer = 5;
984263bc
MD
2334}
2335
2336static void
33c39a69 2337bge_init(void *xsc)
984263bc
MD
2338{
2339 struct bge_softc *sc = xsc;
33c39a69
JS
2340 struct ifnet *ifp = &sc->arpcom.ac_if;
2341 uint16_t *m;
984263bc
MD
2342 int s;
2343
2344 s = splimp();
2345
984263bc
MD
2346 if (ifp->if_flags & IFF_RUNNING) {
2347 splx(s);
2348 return;
2349 }
2350
2351 /* Cancel pending I/O and flush buffers. */
2352 bge_stop(sc);
2353 bge_reset(sc);
2354 bge_chipinit(sc);
2355
2356 /*
2357 * Init the various state machines, ring
2358 * control blocks and firmware.
2359 */
2360 if (bge_blockinit(sc)) {
c6fd6f3b 2361 if_printf(ifp, "initialization failure\n");
984263bc
MD
2362 splx(s);
2363 return;
2364 }
2365
984263bc
MD
2366 /* Specify MTU. */
2367 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
2368 ETHER_HDR_LEN + ETHER_CRC_LEN);
2369
2370 /* Load our MAC address. */
33c39a69 2371 m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
984263bc
MD
2372 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2373 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2374
2375 /* Enable or disable promiscuous mode as needed. */
2376 if (ifp->if_flags & IFF_PROMISC) {
2377 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
2378 } else {
2379 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
2380 }
2381
2382 /* Program multicast filter. */
2383 bge_setmulti(sc);
2384
2385 /* Init RX ring. */
2386 bge_init_rx_ring_std(sc);
2387
7e40b8c5
HP
2388 /*
2389 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
2390 * memory to insure that the chip has in fact read the first
2391 * entry of the ring.
2392 */
2393 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
33c39a69 2394 uint32_t v, i;
7e40b8c5
HP
2395 for (i = 0; i < 10; i++) {
2396 DELAY(20);
2397 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
2398 if (v == (MCLBYTES - ETHER_ALIGN))
2399 break;
2400 }
2401 if (i == 10)
c6fd6f3b 2402 if_printf(ifp, "5705 A0 chip failed to load RX ring\n");
7e40b8c5
HP
2403 }
2404
984263bc
MD
2405 /* Init jumbo RX ring. */
2406 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2407 bge_init_rx_ring_jumbo(sc);
2408
2409 /* Init our RX return ring index */
2410 sc->bge_rx_saved_considx = 0;
2411
2412 /* Init TX ring. */
2413 bge_init_tx_ring(sc);
2414
2415 /* Turn on transmitter */
2416 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
2417
2418 /* Turn on receiver */
2419 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2420
2421 /* Tell firmware we're alive. */
2422 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2423
2424 /* Enable host interrupts. */
2425 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
2426 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2427 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2428
2429 bge_ifmedia_upd(ifp);
2430
2431 ifp->if_flags |= IFF_RUNNING;
2432 ifp->if_flags &= ~IFF_OACTIVE;
2433
2434 splx(s);
2435
263489fb 2436 callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
984263bc
MD
2437}
2438
2439/*
2440 * Set media options.
2441 */
2442static int
33c39a69 2443bge_ifmedia_upd(struct ifnet *ifp)
984263bc 2444{
33c39a69
JS
2445 struct bge_softc *sc = ifp->if_softc;
2446 struct ifmedia *ifm = &sc->bge_ifmedia;
984263bc 2447 struct mii_data *mii;
984263bc
MD
2448
2449 /* If this is a 1000baseX NIC, enable the TBI port. */
2450 if (sc->bge_tbi) {
2451 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2452 return(EINVAL);
2453 switch(IFM_SUBTYPE(ifm->ifm_media)) {
2454 case IFM_AUTO:
2455 break;
2456 case IFM_1000_SX:
2457 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
2458 BGE_CLRBIT(sc, BGE_MAC_MODE,
2459 BGE_MACMODE_HALF_DUPLEX);
2460 } else {
2461 BGE_SETBIT(sc, BGE_MAC_MODE,
2462 BGE_MACMODE_HALF_DUPLEX);
2463 }
2464 break;
2465 default:
2466 return(EINVAL);
2467 }
2468 return(0);
2469 }
2470
2471 mii = device_get_softc(sc->bge_miibus);
2472 sc->bge_link = 0;
2473 if (mii->mii_instance) {
2474 struct mii_softc *miisc;
2475 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
2476 miisc = LIST_NEXT(miisc, mii_list))
2477 mii_phy_reset(miisc);
2478 }
2479 mii_mediachg(mii);
2480
2481 return(0);
2482}
2483
2484/*
2485 * Report current media status.
2486 */
2487static void
33c39a69 2488bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
984263bc 2489{
33c39a69 2490 struct bge_softc *sc = ifp->if_softc;
984263bc
MD
2491 struct mii_data *mii;
2492
984263bc
MD
2493 if (sc->bge_tbi) {
2494 ifmr->ifm_status = IFM_AVALID;
2495 ifmr->ifm_active = IFM_ETHER;
2496 if (CSR_READ_4(sc, BGE_MAC_STS) &
2497 BGE_MACSTAT_TBI_PCS_SYNCHED)
2498 ifmr->ifm_status |= IFM_ACTIVE;
2499 ifmr->ifm_active |= IFM_1000_SX;
2500 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
2501 ifmr->ifm_active |= IFM_HDX;
2502 else
2503 ifmr->ifm_active |= IFM_FDX;
2504 return;
2505 }
2506
2507 mii = device_get_softc(sc->bge_miibus);
2508 mii_pollstat(mii);
2509 ifmr->ifm_active = mii->mii_media_active;
2510 ifmr->ifm_status = mii->mii_media_status;
984263bc
MD
2511}
2512
2513static int
33c39a69 2514bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
984263bc
MD
2515{
2516 struct bge_softc *sc = ifp->if_softc;
2517 struct ifreq *ifr = (struct ifreq *) data;
2518 int s, mask, error = 0;
2519 struct mii_data *mii;
2520
2521 s = splimp();
2522
2523 switch(command) {
984263bc 2524 case SIOCSIFMTU:
7e40b8c5
HP
2525 /* Disallow jumbo frames on 5705. */
2526 if ((sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2527 ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
984263bc
MD
2528 error = EINVAL;
2529 else {
2530 ifp->if_mtu = ifr->ifr_mtu;
2531 ifp->if_flags &= ~IFF_RUNNING;
2532 bge_init(sc);
2533 }
2534 break;
2535 case SIOCSIFFLAGS:
2536 if (ifp->if_flags & IFF_UP) {
2537 /*
2538 * If only the state of the PROMISC flag changed,
2539 * then just use the 'set promisc mode' command
2540 * instead of reinitializing the entire NIC. Doing
2541 * a full re-init means reloading the firmware and
2542 * waiting for it to start up, which may take a
2543 * second or two.
2544 */
2545 if (ifp->if_flags & IFF_RUNNING &&
2546 ifp->if_flags & IFF_PROMISC &&
2547 !(sc->bge_if_flags & IFF_PROMISC)) {
2548 BGE_SETBIT(sc, BGE_RX_MODE,
2549 BGE_RXMODE_RX_PROMISC);
2550 } else if (ifp->if_flags & IFF_RUNNING &&
2551 !(ifp->if_flags & IFF_PROMISC) &&
2552 sc->bge_if_flags & IFF_PROMISC) {
2553 BGE_CLRBIT(sc, BGE_RX_MODE,
2554 BGE_RXMODE_RX_PROMISC);
2555 } else
2556 bge_init(sc);
2557 } else {
2558 if (ifp->if_flags & IFF_RUNNING) {
2559 bge_stop(sc);
2560 }
2561 }
2562 sc->bge_if_flags = ifp->if_flags;
2563 error = 0;
2564 break;
2565 case SIOCADDMULTI:
2566 case SIOCDELMULTI:
2567 if (ifp->if_flags & IFF_RUNNING) {
2568 bge_setmulti(sc);
2569 error = 0;
2570 }
2571 break;
2572 case SIOCSIFMEDIA:
2573 case SIOCGIFMEDIA:
2574 if (sc->bge_tbi) {
2575 error = ifmedia_ioctl(ifp, ifr,
2576 &sc->bge_ifmedia, command);
2577 } else {
2578 mii = device_get_softc(sc->bge_miibus);
2579 error = ifmedia_ioctl(ifp, ifr,
2580 &mii->mii_media, command);
2581 }
2582 break;
2583 case SIOCSIFCAP:
2584 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2585 if (mask & IFCAP_HWCSUM) {
2586 if (IFCAP_HWCSUM & ifp->if_capenable)
2587 ifp->if_capenable &= ~IFCAP_HWCSUM;
2588 else
2589 ifp->if_capenable |= IFCAP_HWCSUM;
2590 }
2591 error = 0;
2592 break;
2593 default:
4cde4dd5 2594 error = ether_ioctl(ifp, command, data);
984263bc
MD
2595 break;
2596 }
2597
33c39a69 2598 splx(s);
984263bc
MD
2599
2600 return(error);
2601}
2602
2603static void
33c39a69 2604bge_watchdog(struct ifnet *ifp)
984263bc 2605{
33c39a69 2606 struct bge_softc *sc = ifp->if_softc;
984263bc 2607
c6fd6f3b 2608 if_printf(ifp, "watchdog timeout -- resetting\n");
984263bc
MD
2609
2610 ifp->if_flags &= ~IFF_RUNNING;
2611 bge_init(sc);
2612
2613 ifp->if_oerrors++;
984263bc
MD
2614}
2615
2616/*
2617 * Stop the adapter and free any mbufs allocated to the
2618 * RX and TX lists.
2619 */
2620static void
33c39a69 2621bge_stop(struct bge_softc *sc)
984263bc 2622{
33c39a69 2623 struct ifnet *ifp = &sc->arpcom.ac_if;
984263bc
MD
2624 struct ifmedia_entry *ifm;
2625 struct mii_data *mii = NULL;
2626 int mtmp, itmp;
2627
984263bc
MD
2628 if (!sc->bge_tbi)
2629 mii = device_get_softc(sc->bge_miibus);
2630
263489fb 2631 callout_stop(&sc->bge_stat_timer);
984263bc
MD
2632
2633 /*
2634 * Disable all of the receiver blocks
2635 */
2636 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2637 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2638 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
7e40b8c5
HP
2639 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
2640 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
984263bc
MD
2641 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
2642 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2643 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
2644
2645 /*
2646 * Disable all of the transmit blocks
2647 */
2648 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2649 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2650 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2651 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
2652 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
7e40b8c5
HP
2653 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
2654 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
984263bc
MD
2655 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2656
2657 /*
2658 * Shut down all of the memory managers and related
2659 * state machines.
2660 */
2661 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
2662 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
7e40b8c5
HP
2663 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
2664 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
984263bc
MD
2665 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
2666 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
7e40b8c5
HP
2667 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
2668 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
2669 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2670 }
984263bc
MD
2671
2672 /* Disable host interrupts. */
2673 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2674 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2675
2676 /*
2677 * Tell firmware we're shutting down.
2678 */
2679 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2680
2681 /* Free the RX lists. */
2682 bge_free_rx_ring_std(sc);
2683
2684 /* Free jumbo RX list. */
7e40b8c5
HP
2685 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
2686 bge_free_rx_ring_jumbo(sc);
984263bc
MD
2687
2688 /* Free TX buffers. */
2689 bge_free_tx_ring(sc);
2690
2691 /*
2692 * Isolate/power down the PHY, but leave the media selection
2693 * unchanged so that things will be put back to normal when
2694 * we bring the interface back up.
2695 */
2696 if (!sc->bge_tbi) {
2697 itmp = ifp->if_flags;
2698 ifp->if_flags |= IFF_UP;
2699 ifm = mii->mii_media.ifm_cur;
2700 mtmp = ifm->ifm_media;
2701 ifm->ifm_media = IFM_ETHER|IFM_NONE;
2702 mii_mediachg(mii);
2703 ifm->ifm_media = mtmp;
2704 ifp->if_flags = itmp;
2705 }
2706
2707 sc->bge_link = 0;
2708
2709 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
2710
2711 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
984263bc
MD
2712}
2713
2714/*
2715 * Stop all chip I/O so that the kernel's probe routines don't
2716 * get confused by errant DMAs when rebooting.
2717 */
2718static void
33c39a69 2719bge_shutdown(device_t dev)
984263bc 2720{
33c39a69 2721 struct bge_softc *sc = device_get_softc(dev);
984263bc
MD
2722
2723 bge_stop(sc);
2724 bge_reset(sc);
984263bc 2725}